/drivers/ddk/Makefile |
---|
0,0 → 1,68 |
CC = gcc |
AS = as |
DRV_TOPDIR = $(CURDIR)/.. |
DRV_INCLUDES = $(DRV_TOPDIR)/include |
INCLUDES = -I$(DRV_INCLUDES) -I$(DRV_INCLUDES)/linux -I$(DRV_INCLUDES)/linux/asm |
DEFINES = -DKOLIBRI -D__KERNEL__ -DCONFIG_X86_32 |
CFLAGS = -c -O2 $(INCLUDES) $(DEFINES) -fomit-frame-pointer -fno-builtin-printf |
NAME:= libdrv |
CORE_SRC= core.S |
NAME_SRCS:= \ |
debug/dbglog.c \ |
io/create.c \ |
io/finfo.c \ |
io/ssize.c \ |
io/write.c \ |
malloc/malloc.c \ |
stdio/icompute.c \ |
stdio/vsprintf.c \ |
stdio/doprnt.c \ |
stdio/chartab.c \ |
string/_memmove.S \ |
string/_strncat.S \ |
string/_strncmp.S \ |
string/_strncpy.S \ |
string/_strnlen.S \ |
string/memcpy.S \ |
string/memcmp.S \ |
string/memset.S \ |
string/strcat.S \ |
string/strchr.S \ |
string/strcpy.S \ |
string/strncpy.S \ |
string/strncmp.S \ |
string/strlen.S |
NAME_OBJS = $(patsubst %.S, %.o, $(patsubst %.asm, %.o,\ |
$(patsubst %.c, %.o, $(NAME_SRCS)))) |
TARGET = $(NAME).a |
all: $(TARGET) libcore.a |
$(TARGET): $(NAME_OBJS) $(NAME_SRC) Makefile |
$(AR) cvrs $@ $(NAME_OBJS) |
libcore.a: core.S Makefile |
$(AS) -o core.o $< |
$(LD) -shared -s --out-implib $@ --output-def core.def -o core.dll core.o |
%.o: %.S Makefile |
$(AS) -o $@ $< |
%.o: %.c Makefile |
$(CC) $(CFLAGS) -o $@ $< |
/drivers/ddk/core.S |
---|
0,0 → 1,141 |
.file "export.s" |
.intel_syntax |
.text |
.global _AllocPage |
.global _AllocPages |
.global _CreateRingBuffer |
.global _Delay |
.global _DestroyObject |
.global _FreeKernelSpace |
.global _GetDisplay |
.global _GetPgAddr |
.global _GetService |
.global _KernelAlloc |
.global _KernelFree |
.global _MapIoMem |
.global _PciApi |
.global _PciRead16 |
.global _PciRead32 |
.global _PciRead8 |
.global _PciWrite16 |
.global _PciWrite32 |
.global _PciWrite8 |
.global _RegService |
.global _SetScreen |
.global _SysMsgBoardStr |
.def _AllocPage; .scl 2; .type 32; .endef |
.def _AllocPages; .scl 2; .type 32; .endef |
.def _CreateRingBuffer; .scl 2; .type 32; .endef |
.def _Delay; .scl 2; .type 32; .endef |
.def _DestroyObject; .scl 2; .type 32; .endef |
.def _FreeKernelSpace; .scl 2; .type 32; .endef |
.def _GetDisplay; .scl 2; .type 32; .endef |
.def _GetPgAddr; .scl 2; .type 32; .endef |
.def _GetService; .scl 2; .type 32; .endef |
.def _KernelAlloc; .scl 2; .type 32; .endef |
.def _KernelFree; .scl 2; .type 32; .endef |
.def _MapIoMem; .scl 2; .type 32; .endef |
.def _PciApi; .scl 2; .type 32; .endef |
.def _PciRead16; .scl 2; .type 32; .endef |
.def _PciRead32; .scl 2; .type 32; .endef |
.def _PciRead8; .scl 2; .type 32; .endef |
.def _PciWrite16; .scl 2; .type 32; .endef |
.def _PciWrite32; .scl 2; .type 32; .endef |
.def _PciWrite8; .scl 2; .type 32; .endef |
.def _RegService; .scl 2; .type 32; .endef |
.def _SetScreen; .scl 2; .type 32; .endef |
.def _SysMsgBoardStr; .scl 2; .type 32; .endef |
_AllocPage: |
_AllocPages: |
_CreateRingBuffer: |
_Delay: |
_DestroyObject: |
_FreeKernelSpace: |
_GetDisplay: |
_GetPgAddr: |
_GetService: |
_KernelAlloc: |
_KernelFree: |
_MapIoMem: |
_PciApi: |
_PciRead16: |
_PciRead32: |
_PciRead8: |
_PciWrite16: |
_PciWrite32: |
_PciWrite8: |
_RegService: |
_SetScreen: |
_SysMsgBoardStr: |
ret |
.section .drectve |
.ascii " -export:AllocPage" # gcc ABI |
.ascii " -export:AllocPages" # gcc ABI |
.ascii " -export:CreateRingBuffer" # stdcall |
.ascii " -export:Delay" # stdcall |
.ascii " -export:DestroyObject" |
.ascii " -export:FreeKernelSpace" # stdcall |
.ascii " -export:GetDisplay" # stdcall |
.ascii " -export:GetPgAddr" # stdcall |
.ascii " -export:GetService" # stdcall |
.ascii " -export:KernelAlloc" # stdcall |
.ascii " -export:KernelFree" # stdcall |
.ascii " -export:MapIoMem" # stdcall |
.ascii " -export:PciApi" # |
.ascii " -export:PciRead16" # stdcall |
.ascii " -export:PciRead32" # stdcall |
.ascii " -export:PciRead8" # stdcall |
.ascii " -export:PciWrite16" # stdcall |
.ascii " -export:PciWrite32" # stdcall |
.ascii " -export:PciWrite8" # stdcall |
.ascii " -export:RegService" # stdcall |
.ascii " -export:SetScreen" # stdcall |
.ascii " -export:SysMsgBoardStr" # stdcall |
/drivers/ddk/debug/dbglog.c |
---|
0,0 → 1,186 |
#include <types.h> |
#include <syscall.h> |
#pragma pack(push, 1) |
typedef struct |
{ |
char sec; |
char min; |
char hour; |
char rsv; |
}detime_t; |
typedef struct |
{ |
char day; |
char month; |
short year; |
}dedate_t; |
typedef struct |
{ |
unsigned attr; |
unsigned flags; |
union |
{ |
detime_t ctime; |
unsigned cr_time; |
}; |
union |
{ |
dedate_t cdate; |
unsigned cr_date; |
}; |
union |
{ |
detime_t atime; |
unsigned acc_time; |
}; |
union |
{ |
dedate_t adate; |
unsigned acc_date; |
}; |
union |
{ |
detime_t mtime; |
unsigned mod_time; |
}; |
union |
{ |
dedate_t mdate; |
unsigned mod_date; |
}; |
unsigned size; |
unsigned size_high; |
} FILEINFO; |
#pragma pack(pop) |
typedef struct |
{ |
char *path; |
int offset; |
} dbgfile_t; |
static dbgfile_t dbgfile; |
#define va_start(v,l) __builtin_va_start(v,l) |
#define va_end(v) __builtin_va_end(v) |
#define va_arg(v,l) __builtin_va_arg(v,l) |
#define __va_copy(d,s) __builtin_va_copy(d,s) |
typedef __builtin_va_list __gnuc_va_list; |
typedef __gnuc_va_list va_list; |
#define arg(x) va_arg (ap, u32_t) |
int dbg_open(char *path) |
{ |
FILEINFO info; |
dbgfile.offset = 0; |
if(get_fileinfo(path,&info)) |
{ |
if(!create_file(path)) |
{ |
dbgfile.path = path; |
return true; |
} |
else return false; |
}; |
set_file_size(path, 0); |
dbgfile.path = path; |
dbgfile.offset = 0; |
return true; |
}; |
int vsnprintf(char *s, size_t n, const char *format, va_list arg); |
int printf(const char* format, ...) |
{ |
char txtbuf[256]; |
int len = 0; |
va_list ap; |
va_start(ap, format); |
if (format) |
len = vsnprintf(txtbuf, 256, format, ap); |
va_end(ap); |
if( len ) |
SysMsgBoardStr(txtbuf); |
return len; |
} |
int dbgprintf(const char* format, ...) |
{ |
char txtbuf[256]; |
unsigned writes; |
int len = 0; |
va_list ap; |
va_start(ap, format); |
if (format) |
len = vsnprintf(txtbuf, 256, format, ap); |
va_end(ap); |
if( len ) |
{ |
SysMsgBoardStr(txtbuf); |
if(dbgfile.path) |
{ |
write_file(dbgfile.path,txtbuf,dbgfile.offset,len,&writes); |
dbgfile.offset+=writes; |
}; |
}; |
return len; |
} |
int xf86DrvMsg(int skip, int code, const char* format, ...) |
{ |
char txtbuf[256]; |
unsigned writes; |
va_list ap; |
int len = 0; |
va_start(ap, format); |
if (format) |
len = vsnprintf(txtbuf, 256, format, ap); |
va_end(ap); |
if( len ) |
{ |
SysMsgBoardStr(txtbuf); |
if(dbgfile.path) |
{ |
write_file(dbgfile.path,txtbuf,dbgfile.offset,len,&writes); |
dbgfile.offset+=writes; |
}; |
}; |
return len; |
} |
int snprintf(char *s, size_t n, const char *format, ...) |
{ |
va_list ap; |
int retval; |
va_start(ap, format); |
retval = vsnprintf(s, n, format, ap); |
va_end(ap); |
return retval; |
} |
/drivers/ddk/io/create.c |
---|
0,0 → 1,22 |
int create_file(const char *path) |
{ |
int retval; |
__asm__ __volatile__ ( |
"pushl $0 \n\t" |
"pushl $0 \n\t" |
"movl %0, 1(%%esp) \n\t" |
"pushl $0 \n\t" |
"pushl $0 \n\t" |
"pushl $0 \n\t" |
"pushl $0 \n\t" |
"pushl $2 \n\t" |
"movl %%esp, %%ebx \n\t" |
"movl $70, %%eax \n\t" |
"int $0x40 \n\t" |
"addl $28, %%esp \n\t" |
:"=a" (retval) |
:"r" (path) |
:"ebx"); |
return retval; |
}; |
/drivers/ddk/io/finfo.c |
---|
0,0 → 1,81 |
#pragma pack(push, 1) |
typedef struct |
{ |
char sec; |
char min; |
char hour; |
char rsv; |
}detime_t; |
typedef struct |
{ |
char day; |
char month; |
short year; |
}dedate_t; |
typedef struct |
{ |
unsigned attr; |
unsigned flags; |
union |
{ |
detime_t ctime; |
unsigned cr_time; |
}; |
union |
{ |
dedate_t cdate; |
unsigned cr_date; |
}; |
union |
{ |
detime_t atime; |
unsigned acc_time; |
}; |
union |
{ |
dedate_t adate; |
unsigned acc_date; |
}; |
union |
{ |
detime_t mtime; |
unsigned mod_time; |
}; |
union |
{ |
dedate_t mdate; |
unsigned mod_date; |
}; |
unsigned size; |
unsigned size_high; |
} FILEINFO; |
#pragma pack(pop) |
int get_fileinfo(const char *path,FILEINFO *info) |
{ |
int retval; |
asm __volatile__ |
( |
"pushl $0 \n\t" |
"pushl $0 \n\t" |
"movl %0, 1(%%esp) \n\t" |
"pushl %%ebx \n\t" |
"pushl $0 \n\t" |
"pushl $0 \n\t" |
"pushl $0 \n\t" |
"pushl $5 \n\t" |
"movl %%esp, %%ebx \n\t" |
"movl $70, %%eax \n\t" |
"int $0x40 \n\t" |
"addl $28, %%esp \n\t" |
:"=a" (retval) |
:"r" (path), "b" (info) |
); |
return retval; |
}; |
/drivers/ddk/io/ssize.c |
---|
0,0 → 1,21 |
int set_file_size(const char *path, unsigned size) |
{ |
int retval; |
__asm__ __volatile__( |
"pushl $0 \n\t" |
"pushl $0 \n\t" |
"movl %0, 1(%%esp) \n\t" |
"pushl $0 \n\t" |
"pushl $0 \n\t" |
"pushl $0 \n\t" |
"pushl %%ebx \n\t" |
"push $4 \n\t" |
"movl %%esp, %%ebx \n\t" |
"movl $70, %%eax \n\t" |
"int $0x40 \n\t" |
"addl $28, %%esp \n\t" |
:"=a" (retval) |
:"a" (path), "b" (size)); |
return retval; |
}; |
/drivers/ddk/io/write.c |
---|
0,0 → 1,26 |
int write_file(const char *path,const void *buff, |
unsigned offset,unsigned count,unsigned *writes) |
{ |
int retval; |
__asm__ __volatile__( |
"pushl $0 \n\t" |
"pushl $0 \n\t" |
"movl %%eax, 1(%%esp) \n\t" |
"pushl %%ebx \n\t" |
"pushl %%edx \n\t" |
"pushl $0 \n\t" |
"pushl %%ecx \n\t" |
"pushl $3 \n\t" |
"movl %%esp, %%ebx \n\t" |
"mov $70, %%eax \n\t" |
"int $0x40 \n\t" |
"testl %%esi, %%esi \n\t" |
"jz 1f \n\t" |
"movl %%ebx, (%%esi) \n\t" |
"1:" |
"addl $28, %%esp \n\t" |
:"=a" (retval) |
:"a"(path),"b"(buff),"c"(offset),"d"(count),"S"(writes)); |
return retval; |
}; |
/drivers/ddk/malloc/malloc.c |
---|
0,0 → 1,3992 |
/* |
This is a version (aka dlmalloc) of malloc/free/realloc written by |
Doug Lea and released to the public domain, as explained at |
http://creativecommons.org/licenses/publicdomain. Send questions, |
comments, complaints, performance data, etc to dl@cs.oswego.edu |
* Version 2.8.3 Thu Sep 22 11:16:15 2005 Doug Lea (dl at gee) |
Note: There may be an updated version of this malloc obtainable at |
ftp://gee.cs.oswego.edu/pub/misc/malloc.c |
Check before installing! |
* Quickstart |
This library is all in one file to simplify the most common usage: |
ftp it, compile it (-O3), and link it into another program. All of |
the compile-time options default to reasonable values for use on |
most platforms. You might later want to step through various |
compile-time and dynamic tuning options. |
For convenience, an include file for code using this malloc is at: |
ftp://gee.cs.oswego.edu/pub/misc/malloc-2.8.3.h |
You don't really need this .h file unless you call functions not |
defined in your system include files. The .h file contains only the |
excerpts from this file needed for using this malloc on ANSI C/C++ |
systems, so long as you haven't changed compile-time options about |
naming and tuning parameters. If you do, then you can create your |
own malloc.h that does include all settings by cutting at the point |
indicated below. Note that you may already by default be using a C |
library containing a malloc that is based on some version of this |
malloc (for example in linux). You might still want to use the one |
in this file to customize settings or to avoid overheads associated |
with library versions. |
* Vital statistics: |
Supported pointer/size_t representation: 4 or 8 bytes |
size_t MUST be an unsigned type of the same width as |
pointers. (If you are using an ancient system that declares |
size_t as a signed type, or need it to be a different width |
than pointers, you can use a previous release of this malloc |
(e.g. 2.7.2) supporting these.) |
Alignment: 8 bytes (default) |
This suffices for nearly all current machines and C compilers. |
However, you can define MALLOC_ALIGNMENT to be wider than this |
if necessary (up to 128bytes), at the expense of using more space. |
Minimum overhead per allocated chunk: 4 or 8 bytes (if 4byte sizes) |
8 or 16 bytes (if 8byte sizes) |
Each malloced chunk has a hidden word of overhead holding size |
and status information, and additional cross-check word |
if FOOTERS is defined. |
Minimum allocated size: 4-byte ptrs: 16 bytes (including overhead) |
8-byte ptrs: 32 bytes (including overhead) |
Even a request for zero bytes (i.e., malloc(0)) returns a |
pointer to something of the minimum allocatable size. |
The maximum overhead wastage (i.e., number of extra bytes |
allocated than were requested in malloc) is less than or equal |
to the minimum size, except for requests >= mmap_threshold that |
are serviced via mmap(), where the worst case wastage is about |
32 bytes plus the remainder from a system page (the minimal |
mmap unit); typically 4096 or 8192 bytes. |
Security: static-safe; optionally more or less |
The "security" of malloc refers to the ability of malicious |
code to accentuate the effects of errors (for example, freeing |
space that is not currently malloc'ed or overwriting past the |
ends of chunks) in code that calls malloc. This malloc |
guarantees not to modify any memory locations below the base of |
heap, i.e., static variables, even in the presence of usage |
errors. The routines additionally detect most improper frees |
and reallocs. All this holds as long as the static bookkeeping |
for malloc itself is not corrupted by some other means. This |
is only one aspect of security -- these checks do not, and |
cannot, detect all possible programming errors. |
If FOOTERS is defined nonzero, then each allocated chunk |
carries an additional check word to verify that it was malloced |
from its space. These check words are the same within each |
execution of a program using malloc, but differ across |
executions, so externally crafted fake chunks cannot be |
freed. This improves security by rejecting frees/reallocs that |
could corrupt heap memory, in addition to the checks preventing |
writes to statics that are always on. This may further improve |
security at the expense of time and space overhead. (Note that |
FOOTERS may also be worth using with MSPACES.) |
By default detected errors cause the program to abort (calling |
"abort()"). You can override this to instead proceed past |
errors by defining PROCEED_ON_ERROR. In this case, a bad free |
has no effect, and a malloc that encounters a bad address |
caused by user overwrites will ignore the bad address by |
dropping pointers and indices to all known memory. This may |
be appropriate for programs that should continue if at all |
possible in the face of programming errors, although they may |
run out of memory because dropped memory is never reclaimed. |
If you don't like either of these options, you can define |
CORRUPTION_ERROR_ACTION and USAGE_ERROR_ACTION to do anything |
else. And if if you are sure that your program using malloc has |
no errors or vulnerabilities, you can define INSECURE to 1, |
which might (or might not) provide a small performance improvement. |
Thread-safety: NOT thread-safe unless USE_LOCKS defined |
When USE_LOCKS is defined, each public call to malloc, free, |
etc is surrounded with either a pthread mutex or a win32 |
spinlock (depending on WIN32). This is not especially fast, and |
can be a major bottleneck. It is designed only to provide |
minimal protection in concurrent environments, and to provide a |
basis for extensions. If you are using malloc in a concurrent |
program, consider instead using ptmalloc, which is derived from |
a version of this malloc. (See http://www.malloc.de). |
System requirements: Any combination of MORECORE and/or MMAP/MUNMAP |
This malloc can use unix sbrk or any emulation (invoked using |
the CALL_MORECORE macro) and/or mmap/munmap or any emulation |
(invoked using CALL_MMAP/CALL_MUNMAP) to get and release system |
memory. On most unix systems, it tends to work best if both |
MORECORE and MMAP are enabled. On Win32, it uses emulations |
based on VirtualAlloc. It also uses common C library functions |
like memset. |
Compliance: I believe it is compliant with the Single Unix Specification |
(See http://www.unix.org). Also SVID/XPG, ANSI C, and probably |
others as well. |
* Overview of algorithms |
This is not the fastest, most space-conserving, most portable, or |
most tunable malloc ever written. However it is among the fastest |
while also being among the most space-conserving, portable and |
tunable. Consistent balance across these factors results in a good |
general-purpose allocator for malloc-intensive programs. |
In most ways, this malloc is a best-fit allocator. Generally, it |
chooses the best-fitting existing chunk for a request, with ties |
broken in approximately least-recently-used order. (This strategy |
normally maintains low fragmentation.) However, for requests less |
than 256bytes, it deviates from best-fit when there is not an |
exactly fitting available chunk by preferring to use space adjacent |
to that used for the previous small request, as well as by breaking |
ties in approximately most-recently-used order. (These enhance |
locality of series of small allocations.) And for very large requests |
(>= 256Kb by default), it relies on system memory mapping |
facilities, if supported. (This helps avoid carrying around and |
possibly fragmenting memory used only for large chunks.) |
All operations (except malloc_stats and mallinfo) have execution |
times that are bounded by a constant factor of the number of bits in |
a size_t, not counting any clearing in calloc or copying in realloc, |
or actions surrounding MORECORE and MMAP that have times |
proportional to the number of non-contiguous regions returned by |
system allocation routines, which is often just 1. |
The implementation is not very modular and seriously overuses |
macros. Perhaps someday all C compilers will do as good a job |
inlining modular code as can now be done by brute-force expansion, |
but now, enough of them seem not to. |
Some compilers issue a lot of warnings about code that is |
dead/unreachable only on some platforms, and also about intentional |
uses of negation on unsigned types. All known cases of each can be |
ignored. |
For a longer but out of date high-level description, see |
http://gee.cs.oswego.edu/dl/html/malloc.html |
* MSPACES |
If MSPACES is defined, then in addition to malloc, free, etc., |
this file also defines mspace_malloc, mspace_free, etc. These |
are versions of malloc routines that take an "mspace" argument |
obtained using create_mspace, to control all internal bookkeeping. |
If ONLY_MSPACES is defined, only these versions are compiled. |
So if you would like to use this allocator for only some allocations, |
and your system malloc for others, you can compile with |
ONLY_MSPACES and then do something like... |
static mspace mymspace = create_mspace(0,0); // for example |
#define mymalloc(bytes) mspace_malloc(mymspace, bytes) |
(Note: If you only need one instance of an mspace, you can instead |
use "USE_DL_PREFIX" to relabel the global malloc.) |
You can similarly create thread-local allocators by storing |
mspaces as thread-locals. For example: |
static __thread mspace tlms = 0; |
void* tlmalloc(size_t bytes) { |
if (tlms == 0) tlms = create_mspace(0, 0); |
return mspace_malloc(tlms, bytes); |
} |
void tlfree(void* mem) { mspace_free(tlms, mem); } |
Unless FOOTERS is defined, each mspace is completely independent. |
You cannot allocate from one and free to another (although |
conformance is only weakly checked, so usage errors are not always |
caught). If FOOTERS is defined, then each chunk carries around a tag |
indicating its originating mspace, and frees are directed to their |
originating spaces. |
------------------------- Compile-time options --------------------------- |
Be careful in setting #define values for numerical constants of type |
size_t. On some systems, literal values are not automatically extended |
to size_t precision unless they are explicitly casted. |
WIN32 default: defined if _WIN32 defined |
Defining WIN32 sets up defaults for MS environment and compilers. |
Otherwise defaults are for unix. |
MALLOC_ALIGNMENT default: (size_t)8 |
Controls the minimum alignment for malloc'ed chunks. It must be a |
power of two and at least 8, even on machines for which smaller |
alignments would suffice. It may be defined as larger than this |
though. Note however that code and data structures are optimized for |
the case of 8-byte alignment. |
MSPACES default: 0 (false) |
If true, compile in support for independent allocation spaces. |
This is only supported if HAVE_MMAP is true. |
ONLY_MSPACES default: 0 (false) |
If true, only compile in mspace versions, not regular versions. |
USE_LOCKS default: 0 (false) |
Causes each call to each public routine to be surrounded with |
pthread or WIN32 mutex lock/unlock. (If set true, this can be |
overridden on a per-mspace basis for mspace versions.) |
FOOTERS default: 0 |
If true, provide extra checking and dispatching by placing |
information in the footers of allocated chunks. This adds |
space and time overhead. |
INSECURE default: 0 |
If true, omit checks for usage errors and heap space overwrites. |
USE_DL_PREFIX default: NOT defined |
Causes compiler to prefix all public routines with the string 'dl'. |
This can be useful when you only want to use this malloc in one part |
of a program, using your regular system malloc elsewhere. |
ABORT default: defined as abort() |
Defines how to abort on failed checks. On most systems, a failed |
check cannot die with an "assert" or even print an informative |
message, because the underlying print routines in turn call malloc, |
which will fail again. Generally, the best policy is to simply call |
abort(). It's not very useful to do more than this because many |
errors due to overwriting will show up as address faults (null, odd |
addresses etc) rather than malloc-triggered checks, so will also |
abort. Also, most compilers know that abort() does not return, so |
can better optimize code conditionally calling it. |
PROCEED_ON_ERROR default: defined as 0 (false) |
Controls whether detected bad addresses cause them to bypassed |
rather than aborting. If set, detected bad arguments to free and |
realloc are ignored. And all bookkeeping information is zeroed out |
upon a detected overwrite of freed heap space, thus losing the |
ability to ever return it from malloc again, but enabling the |
application to proceed. If PROCEED_ON_ERROR is defined, the |
static variable malloc_corruption_error_count is compiled in |
and can be examined to see if errors have occurred. This option |
generates slower code than the default abort policy. |
DEBUG default: NOT defined |
The DEBUG setting is mainly intended for people trying to modify |
this code or diagnose problems when porting to new platforms. |
However, it may also be able to better isolate user errors than just |
using runtime checks. The assertions in the check routines spell |
out in more detail the assumptions and invariants underlying the |
algorithms. The checking is fairly extensive, and will slow down |
execution noticeably. Calling malloc_stats or mallinfo with DEBUG |
set will attempt to check every non-mmapped allocated and free chunk |
in the course of computing the summaries. |
ABORT_ON_ASSERT_FAILURE default: defined as 1 (true) |
Debugging assertion failures can be nearly impossible if your |
version of the assert macro causes malloc to be called, which will |
lead to a cascade of further failures, blowing the runtime stack. |
ABORT_ON_ASSERT_FAILURE cause assertions failures to call abort(), |
which will usually make debugging easier. |
MALLOC_FAILURE_ACTION default: sets errno to ENOMEM, or no-op on win32 |
The action to take before "return 0" when malloc fails to be able to |
return memory because there is none available. |
HAVE_MORECORE default: 1 (true) unless win32 or ONLY_MSPACES |
True if this system supports sbrk or an emulation of it. |
MORECORE default: sbrk |
The name of the sbrk-style system routine to call to obtain more |
memory. See below for guidance on writing custom MORECORE |
functions. The type of the argument to sbrk/MORECORE varies across |
systems. It cannot be size_t, because it supports negative |
arguments, so it is normally the signed type of the same width as |
size_t (sometimes declared as "intptr_t"). It doesn't much matter |
though. Internally, we only call it with arguments less than half |
the max value of a size_t, which should work across all reasonable |
possibilities, although sometimes generating compiler warnings. See |
near the end of this file for guidelines for creating a custom |
version of MORECORE. |
MORECORE_CONTIGUOUS default: 1 (true) |
If true, take advantage of fact that consecutive calls to MORECORE |
with positive arguments always return contiguous increasing |
addresses. This is true of unix sbrk. It does not hurt too much to |
set it true anyway, since malloc copes with non-contiguities. |
Setting it false when definitely non-contiguous saves time |
and possibly wasted space it would take to discover this though. |
MORECORE_CANNOT_TRIM default: NOT defined |
True if MORECORE cannot release space back to the system when given |
negative arguments. This is generally necessary only if you are |
using a hand-crafted MORECORE function that cannot handle negative |
arguments. |
HAVE_MMAP default: 1 (true) |
True if this system supports mmap or an emulation of it. If so, and |
HAVE_MORECORE is not true, MMAP is used for all system |
allocation. If set and HAVE_MORECORE is true as well, MMAP is |
primarily used to directly allocate very large blocks. It is also |
used as a backup strategy in cases where MORECORE fails to provide |
space from system. Note: A single call to MUNMAP is assumed to be |
able to unmap memory that may have be allocated using multiple calls |
to MMAP, so long as they are adjacent. |
HAVE_MREMAP default: 1 on linux, else 0 |
If true realloc() uses mremap() to re-allocate large blocks and |
extend or shrink allocation spaces. |
MMAP_CLEARS default: 1 on unix |
True if mmap clears memory so calloc doesn't need to. This is true |
for standard unix mmap using /dev/zero. |
USE_BUILTIN_FFS default: 0 (i.e., not used) |
Causes malloc to use the builtin ffs() function to compute indices. |
Some compilers may recognize and intrinsify ffs to be faster than the |
supplied C version. Also, the case of x86 using gcc is special-cased |
to an asm instruction, so is already as fast as it can be, and so |
this setting has no effect. (On most x86s, the asm version is only |
slightly faster than the C version.) |
malloc_getpagesize default: derive from system includes, or 4096. |
The system page size. To the extent possible, this malloc manages |
memory from the system in page-size units. This may be (and |
usually is) a function rather than a constant. This is ignored |
if WIN32, where page size is determined using getSystemInfo during |
initialization. |
USE_DEV_RANDOM default: 0 (i.e., not used) |
Causes malloc to use /dev/random to initialize secure magic seed for |
stamping footers. Otherwise, the current time is used. |
NO_MALLINFO default: 0 |
If defined, don't compile "mallinfo". This can be a simple way |
of dealing with mismatches between system declarations and |
those in this file. |
MALLINFO_FIELD_TYPE default: size_t |
The type of the fields in the mallinfo struct. This was originally |
defined as "int" in SVID etc, but is more usefully defined as |
size_t. The value is used only if HAVE_USR_INCLUDE_MALLOC_H is not set |
REALLOC_ZERO_BYTES_FREES default: not defined |
This should be set if a call to realloc with zero bytes should |
be the same as a call to free. Some people think it should. Otherwise, |
since this malloc returns a unique pointer for malloc(0), so does |
realloc(p, 0). |
LACKS_UNISTD_H, LACKS_FCNTL_H, LACKS_SYS_PARAM_H, LACKS_SYS_MMAN_H |
LACKS_STRINGS_H, LACKS_STRING_H, LACKS_SYS_TYPES_H, LACKS_ERRNO_H |
LACKS_STDLIB_H default: NOT defined unless on WIN32 |
Define these if your system does not have these header files. |
You might need to manually insert some of the declarations they provide. |
DEFAULT_GRANULARITY default: page size if MORECORE_CONTIGUOUS, |
system_info.dwAllocationGranularity in WIN32, |
otherwise 64K. |
Also settable using mallopt(M_GRANULARITY, x) |
The unit for allocating and deallocating memory from the system. On |
most systems with contiguous MORECORE, there is no reason to |
make this more than a page. However, systems with MMAP tend to |
either require or encourage larger granularities. You can increase |
this value to prevent system allocation functions to be called so |
often, especially if they are slow. The value must be at least one |
page and must be a power of two. Setting to 0 causes initialization |
to either page size or win32 region size. (Note: In previous |
versions of malloc, the equivalent of this option was called |
"TOP_PAD") |
DEFAULT_TRIM_THRESHOLD default: 2MB |
Also settable using mallopt(M_TRIM_THRESHOLD, x) |
The maximum amount of unused top-most memory to keep before |
releasing via malloc_trim in free(). Automatic trimming is mainly |
useful in long-lived programs using contiguous MORECORE. Because |
trimming via sbrk can be slow on some systems, and can sometimes be |
wasteful (in cases where programs immediately afterward allocate |
more large chunks) the value should be high enough so that your |
overall system performance would improve by releasing this much |
memory. As a rough guide, you might set to a value close to the |
average size of a process (program) running on your system. |
Releasing this much memory would allow such a process to run in |
memory. Generally, it is worth tuning trim thresholds when a |
program undergoes phases where several large chunks are allocated |
and released in ways that can reuse each other's storage, perhaps |
mixed with phases where there are no such chunks at all. The trim |
value must be greater than page size to have any useful effect. To |
disable trimming completely, you can set to MAX_SIZE_T. Note that the trick |
some people use of mallocing a huge space and then freeing it at |
program startup, in an attempt to reserve system memory, doesn't |
have the intended effect under automatic trimming, since that memory |
will immediately be returned to the system. |
DEFAULT_MMAP_THRESHOLD default: 256K |
Also settable using mallopt(M_MMAP_THRESHOLD, x) |
The request size threshold for using MMAP to directly service a |
request. Requests of at least this size that cannot be allocated |
using already-existing space will be serviced via mmap. (If enough |
normal freed space already exists it is used instead.) Using mmap |
segregates relatively large chunks of memory so that they can be |
individually obtained and released from the host system. A request |
serviced through mmap is never reused by any other request (at least |
not directly; the system may just so happen to remap successive |
requests to the same locations). Segregating space in this way has |
the benefits that: Mmapped space can always be individually released |
back to the system, which helps keep the system level memory demands |
of a long-lived program low. Also, mapped memory doesn't become |
`locked' between other chunks, as can happen with normally allocated |
chunks, which means that even trimming via malloc_trim would not |
release them. However, it has the disadvantage that the space |
cannot be reclaimed, consolidated, and then used to service later |
requests, as happens with normal chunks. The advantages of mmap |
nearly always outweigh disadvantages for "large" chunks, but the |
value of "large" may vary across systems. The default is an |
empirically derived value that works well in most systems. You can |
disable mmap by setting to MAX_SIZE_T. |
*/ |
#ifdef KOLIBRI |
#define IMPORT __attribute__ ((stdcall)) __attribute__ ((dllimport)) |
void* IMPORT KernelAlloc(unsigned size)__asm__("KernelAlloc"); |
void IMPORT KernelFree(void *mem)__asm__("KernelFree"); |
#else |
#define IMPORT __attribute__ ((dllimport)) |
void* __fastcall IMPORT mem_alloc(unsigned size, unsigned flags)__asm__("MemAlloc"); |
void __fastcall IMPORT mem_free(void *mem)__asm__("MemFree"); |
#endif |
#define MALLOC_ALIGNMENT ((size_t)8U) |
#define DEFAULT_MMAP_THRESHOLD ((size_t)32U * (size_t)1024U) |
#define NO_MALLINFO 1 |
#define MORECORE_CANNOT_TRIM |
#define FOOTERS 0 |
#define ABORT |
#undef WIN32 |
#undef _WIN32 |
typedef unsigned int size_t; |
#define HAVE_MMAP 1 |
#define HAVE_MORECORE 0 |
#define LACKS_UNISTD_H |
#define LACKS_SYS_PARAM_H |
#define LACKS_SYS_MMAN_H |
#define LACKS_STRING_H |
#define LACKS_STRINGS_H |
#define LACKS_SYS_TYPES_H |
#define LACKS_ERRNO_H |
#define MALLOC_FAILURE_ACTION |
#define MMAP_CLEARS 0 /* WINCE and some others apparently don't clear */ |
#if defined(DARWIN) || defined(_DARWIN) |
/* Mac OSX docs advise not to use sbrk; it seems better to use mmap */ |
#ifndef HAVE_MORECORE |
#define HAVE_MORECORE 0 |
#define HAVE_MMAP 1 |
#endif /* HAVE_MORECORE */ |
#endif /* DARWIN */ |
#ifndef LACKS_SYS_TYPES_H |
#include <sys/types.h> /* For size_t */ |
#endif /* LACKS_SYS_TYPES_H */ |
/* The maximum possible size_t value has all bits set */ |
#define MAX_SIZE_T (~(size_t)0) |
#ifndef ONLY_MSPACES |
#define ONLY_MSPACES 0 |
#endif /* ONLY_MSPACES */ |
#ifndef MSPACES |
#if ONLY_MSPACES |
#define MSPACES 1 |
#else /* ONLY_MSPACES */ |
#define MSPACES 0 |
#endif /* ONLY_MSPACES */ |
#endif /* MSPACES */ |
#ifndef MALLOC_ALIGNMENT |
#define MALLOC_ALIGNMENT ((size_t)8U) |
#endif /* MALLOC_ALIGNMENT */ |
#ifndef FOOTERS |
#define FOOTERS 0 |
#endif /* FOOTERS */ |
#ifndef ABORT |
#define ABORT abort() |
#endif /* ABORT */ |
#ifndef ABORT_ON_ASSERT_FAILURE |
#define ABORT_ON_ASSERT_FAILURE 1 |
#endif /* ABORT_ON_ASSERT_FAILURE */ |
#ifndef PROCEED_ON_ERROR |
#define PROCEED_ON_ERROR 0 |
#endif /* PROCEED_ON_ERROR */ |
#ifndef USE_LOCKS |
#define USE_LOCKS 0 |
#endif /* USE_LOCKS */ |
#ifndef INSECURE |
#define INSECURE 0 |
#endif /* INSECURE */ |
#ifndef HAVE_MMAP |
#define HAVE_MMAP 1 |
#endif /* HAVE_MMAP */ |
#ifndef MMAP_CLEARS |
#define MMAP_CLEARS 1 |
#endif /* MMAP_CLEARS */ |
#ifndef HAVE_MREMAP |
#ifdef linux |
#define HAVE_MREMAP 1 |
#else /* linux */ |
#define HAVE_MREMAP 0 |
#endif /* linux */ |
#endif /* HAVE_MREMAP */ |
#ifndef MALLOC_FAILURE_ACTION |
#define MALLOC_FAILURE_ACTION errno = ENOMEM; |
#endif /* MALLOC_FAILURE_ACTION */ |
#ifndef HAVE_MORECORE |
#if ONLY_MSPACES |
#define HAVE_MORECORE 0 |
#else /* ONLY_MSPACES */ |
#define HAVE_MORECORE 1 |
#endif /* ONLY_MSPACES */ |
#endif /* HAVE_MORECORE */ |
#if !HAVE_MORECORE |
#define MORECORE_CONTIGUOUS 0 |
#else /* !HAVE_MORECORE */ |
#ifndef MORECORE |
#define MORECORE sbrk |
#endif /* MORECORE */ |
#ifndef MORECORE_CONTIGUOUS |
#define MORECORE_CONTIGUOUS 1 |
#endif /* MORECORE_CONTIGUOUS */ |
#endif /* HAVE_MORECORE */ |
#ifndef DEFAULT_GRANULARITY |
#if MORECORE_CONTIGUOUS |
#define DEFAULT_GRANULARITY (0) /* 0 means to compute in init_mparams */ |
#else /* MORECORE_CONTIGUOUS */ |
#define DEFAULT_GRANULARITY ((size_t)64U * (size_t)1024U) |
#endif /* MORECORE_CONTIGUOUS */ |
#endif /* DEFAULT_GRANULARITY */ |
#ifndef DEFAULT_TRIM_THRESHOLD |
#ifndef MORECORE_CANNOT_TRIM |
#define DEFAULT_TRIM_THRESHOLD ((size_t)2U * (size_t)1024U * (size_t)1024U) |
#else /* MORECORE_CANNOT_TRIM */ |
#define DEFAULT_TRIM_THRESHOLD MAX_SIZE_T |
#endif /* MORECORE_CANNOT_TRIM */ |
#endif /* DEFAULT_TRIM_THRESHOLD */ |
#ifndef DEFAULT_MMAP_THRESHOLD |
#if HAVE_MMAP |
#define DEFAULT_MMAP_THRESHOLD ((size_t)256U * (size_t)1024U) |
#else /* HAVE_MMAP */ |
#define DEFAULT_MMAP_THRESHOLD MAX_SIZE_T |
#endif /* HAVE_MMAP */ |
#endif /* DEFAULT_MMAP_THRESHOLD */ |
#ifndef USE_BUILTIN_FFS |
#define USE_BUILTIN_FFS 0 |
#endif /* USE_BUILTIN_FFS */ |
#ifndef USE_DEV_RANDOM |
#define USE_DEV_RANDOM 0 |
#endif /* USE_DEV_RANDOM */ |
#ifndef NO_MALLINFO |
#define NO_MALLINFO 0 |
#endif /* NO_MALLINFO */ |
#ifndef MALLINFO_FIELD_TYPE |
#define MALLINFO_FIELD_TYPE size_t |
#endif /* MALLINFO_FIELD_TYPE */ |
/* |
mallopt tuning options. SVID/XPG defines four standard parameter |
numbers for mallopt, normally defined in malloc.h. None of these |
are used in this malloc, so setting them has no effect. But this |
malloc does support the following options. |
*/ |
#define M_TRIM_THRESHOLD (-1) |
#define M_GRANULARITY (-2) |
#define M_MMAP_THRESHOLD (-3) |
/* ------------------------ Mallinfo declarations ------------------------ */ |
#if !NO_MALLINFO |
#endif /* NO_MALLINFO */ |
#ifdef __cplusplus |
extern "C" { |
#endif /* __cplusplus */ |
#if !ONLY_MSPACES |
/* ------------------- Declarations of public routines ------------------- */ |
#ifndef USE_DL_PREFIX |
#define dlcalloc calloc |
#define dlfree free |
#define dlmalloc malloc |
#define dlmemalign memalign |
#define dlrealloc realloc |
#define dlvalloc valloc |
#define dlpvalloc pvalloc |
#define dlmallinfo mallinfo |
#define dlmallopt mallopt |
#define dlmalloc_trim malloc_trim |
#define dlmalloc_stats malloc_stats |
#define dlmalloc_usable_size malloc_usable_size |
#define dlmalloc_footprint malloc_footprint |
#define dlmalloc_max_footprint malloc_max_footprint |
#define dlindependent_calloc independent_calloc |
#define dlindependent_comalloc independent_comalloc |
#endif /* USE_DL_PREFIX */ |
/* |
malloc(size_t n) |
Returns a pointer to a newly allocated chunk of at least n bytes, or |
null if no space is available, in which case errno is set to ENOMEM |
on ANSI C systems. |
If n is zero, malloc returns a minimum-sized chunk. (The minimum |
size is 16 bytes on most 32bit systems, and 32 bytes on 64bit |
systems.) Note that size_t is an unsigned type, so calls with |
arguments that would be negative if signed are interpreted as |
requests for huge amounts of space, which will often fail. The |
maximum supported value of n differs across systems, but is in all |
cases less than the maximum representable value of a size_t. |
*/ |
void* dlmalloc(size_t); |
/* |
free(void* p) |
Releases the chunk of memory pointed to by p, that had been previously |
allocated using malloc or a related routine such as realloc. |
It has no effect if p is null. If p was not malloced or already |
freed, free(p) will by default cause the current program to abort. |
*/ |
void dlfree(void*); |
/* |
calloc(size_t n_elements, size_t element_size); |
Returns a pointer to n_elements * element_size bytes, with all locations |
set to zero. |
*/ |
void* dlcalloc(size_t, size_t); |
/* |
realloc(void* p, size_t n) |
Returns a pointer to a chunk of size n that contains the same data |
as does chunk p up to the minimum of (n, p's size) bytes, or null |
if no space is available. |
The returned pointer may or may not be the same as p. The algorithm |
prefers extending p in most cases when possible, otherwise it |
employs the equivalent of a malloc-copy-free sequence. |
If p is null, realloc is equivalent to malloc. |
If space is not available, realloc returns null, errno is set (if on |
ANSI) and p is NOT freed. |
if n is for fewer bytes than already held by p, the newly unused |
space is lopped off and freed if possible. realloc with a size |
argument of zero (re)allocates a minimum-sized chunk. |
The old unix realloc convention of allowing the last-free'd chunk |
to be used as an argument to realloc is not supported. |
*/ |
void* dlrealloc(void*, size_t); |
/* |
memalign(size_t alignment, size_t n); |
Returns a pointer to a newly allocated chunk of n bytes, aligned |
in accord with the alignment argument. |
The alignment argument should be a power of two. If the argument is |
not a power of two, the nearest greater power is used. |
8-byte alignment is guaranteed by normal malloc calls, so don't |
bother calling memalign with an argument of 8 or less. |
Overreliance on memalign is a sure way to fragment space. |
*/ |
void* dlmemalign(size_t, size_t); |
/* |
valloc(size_t n); |
Equivalent to memalign(pagesize, n), where pagesize is the page |
size of the system. If the pagesize is unknown, 4096 is used. |
*/ |
void* dlvalloc(size_t); |
/* |
mallopt(int parameter_number, int parameter_value) |
Sets tunable parameters The format is to provide a |
(parameter-number, parameter-value) pair. mallopt then sets the |
corresponding parameter to the argument value if it can (i.e., so |
long as the value is meaningful), and returns 1 if successful else |
0. SVID/XPG/ANSI defines four standard param numbers for mallopt, |
normally defined in malloc.h. None of these are use in this malloc, |
so setting them has no effect. But this malloc also supports other |
options in mallopt. See below for details. Briefly, supported |
parameters are as follows (listed defaults are for "typical" |
configurations). |
Symbol param # default allowed param values |
M_TRIM_THRESHOLD -1 2*1024*1024 any (MAX_SIZE_T disables) |
M_GRANULARITY -2 page size any power of 2 >= page size |
M_MMAP_THRESHOLD -3 256*1024 any (or 0 if no MMAP support) |
*/ |
int dlmallopt(int, int); |
/* |
malloc_footprint(); |
Returns the number of bytes obtained from the system. The total |
number of bytes allocated by malloc, realloc etc., is less than this |
value. Unlike mallinfo, this function returns only a precomputed |
result, so can be called frequently to monitor memory consumption. |
Even if locks are otherwise defined, this function does not use them, |
so results might not be up to date. |
*/ |
size_t dlmalloc_footprint(void); |
/* |
malloc_max_footprint(); |
Returns the maximum number of bytes obtained from the system. This |
value will be greater than current footprint if deallocated space |
has been reclaimed by the system. The peak number of bytes allocated |
by malloc, realloc etc., is less than this value. Unlike mallinfo, |
this function returns only a precomputed result, so can be called |
frequently to monitor memory consumption. Even if locks are |
otherwise defined, this function does not use them, so results might |
not be up to date. |
*/ |
size_t dlmalloc_max_footprint(void); |
#if !NO_MALLINFO |
#endif /* NO_MALLINFO */ |
/* |
independent_calloc(size_t n_elements, size_t element_size, void* chunks[]); |
independent_calloc is similar to calloc, but instead of returning a |
single cleared space, it returns an array of pointers to n_elements |
independent elements that can hold contents of size elem_size, each |
of which starts out cleared, and can be independently freed, |
realloc'ed etc. The elements are guaranteed to be adjacently |
allocated (this is not guaranteed to occur with multiple callocs or |
mallocs), which may also improve cache locality in some |
applications. |
The "chunks" argument is optional (i.e., may be null, which is |
probably the most typical usage). If it is null, the returned array |
is itself dynamically allocated and should also be freed when it is |
no longer needed. Otherwise, the chunks array must be of at least |
n_elements in length. It is filled in with the pointers to the |
chunks. |
In either case, independent_calloc returns this pointer array, or |
null if the allocation failed. If n_elements is zero and "chunks" |
is null, it returns a chunk representing an array with zero elements |
(which should be freed if not wanted). |
Each element must be individually freed when it is no longer |
needed. If you'd like to instead be able to free all at once, you |
should instead use regular calloc and assign pointers into this |
space to represent elements. (In this case though, you cannot |
independently free elements.) |
independent_calloc simplifies and speeds up implementations of many |
kinds of pools. It may also be useful when constructing large data |
structures that initially have a fixed number of fixed-sized nodes, |
but the number is not known at compile time, and some of the nodes |
may later need to be freed. For example: |
struct Node { int item; struct Node* next; }; |
struct Node* build_list() { |
struct Node** pool; |
int n = read_number_of_nodes_needed(); |
if (n <= 0) return 0; |
pool = (struct Node**)(independent_calloc(n, sizeof(struct Node), 0); |
if (pool == 0) die(); |
// organize into a linked list... |
struct Node* first = pool[0]; |
for (i = 0; i < n-1; ++i) |
pool[i]->next = pool[i+1]; |
free(pool); // Can now free the array (or not, if it is needed later) |
return first; |
} |
*/ |
void** dlindependent_calloc(size_t, size_t, void**); |
/* |
independent_comalloc(size_t n_elements, size_t sizes[], void* chunks[]); |
independent_comalloc allocates, all at once, a set of n_elements |
chunks with sizes indicated in the "sizes" array. It returns |
an array of pointers to these elements, each of which can be |
independently freed, realloc'ed etc. The elements are guaranteed to |
be adjacently allocated (this is not guaranteed to occur with |
multiple callocs or mallocs), which may also improve cache locality |
in some applications. |
The "chunks" argument is optional (i.e., may be null). If it is null |
the returned array is itself dynamically allocated and should also |
be freed when it is no longer needed. Otherwise, the chunks array |
must be of at least n_elements in length. It is filled in with the |
pointers to the chunks. |
In either case, independent_comalloc returns this pointer array, or |
null if the allocation failed. If n_elements is zero and chunks is |
null, it returns a chunk representing an array with zero elements |
(which should be freed if not wanted). |
Each element must be individually freed when it is no longer |
needed. If you'd like to instead be able to free all at once, you |
should instead use a single regular malloc, and assign pointers at |
particular offsets in the aggregate space. (In this case though, you |
cannot independently free elements.) |
independent_comallac differs from independent_calloc in that each |
element may have a different size, and also that it does not |
automatically clear elements. |
independent_comalloc can be used to speed up allocation in cases |
where several structs or objects must always be allocated at the |
same time. For example: |
struct Head { ... } |
struct Foot { ... } |
void send_message(char* msg) { |
int msglen = strlen(msg); |
size_t sizes[3] = { sizeof(struct Head), msglen, sizeof(struct Foot) }; |
void* chunks[3]; |
if (independent_comalloc(3, sizes, chunks) == 0) |
die(); |
struct Head* head = (struct Head*)(chunks[0]); |
char* body = (char*)(chunks[1]); |
struct Foot* foot = (struct Foot*)(chunks[2]); |
// ... |
} |
In general though, independent_comalloc is worth using only for |
larger values of n_elements. For small values, you probably won't |
detect enough difference from series of malloc calls to bother. |
Overuse of independent_comalloc can increase overall memory usage, |
since it cannot reuse existing noncontiguous small chunks that |
might be available for some of the elements. |
*/ |
void** dlindependent_comalloc(size_t, size_t*, void**); |
/* |
pvalloc(size_t n); |
Equivalent to valloc(minimum-page-that-holds(n)), that is, |
round up n to nearest pagesize. |
*/ |
void* dlpvalloc(size_t); |
/* |
malloc_trim(size_t pad); |
If possible, gives memory back to the system (via negative arguments |
to sbrk) if there is unused memory at the `high' end of the malloc |
pool or in unused MMAP segments. You can call this after freeing |
large blocks of memory to potentially reduce the system-level memory |
requirements of a program. However, it cannot guarantee to reduce |
memory. Under some allocation patterns, some large free blocks of |
memory will be locked between two used chunks, so they cannot be |
given back to the system. |
The `pad' argument to malloc_trim represents the amount of free |
trailing space to leave untrimmed. If this argument is zero, only |
the minimum amount of memory to maintain internal data structures |
will be left. Non-zero arguments can be supplied to maintain enough |
trailing space to service future expected allocations without having |
to re-obtain memory from the system. |
Malloc_trim returns 1 if it actually released any memory, else 0. |
*/ |
int dlmalloc_trim(size_t); |
/* |
malloc_usable_size(void* p); |
Returns the number of bytes you can actually use in |
an allocated chunk, which may be more than you requested (although |
often not) due to alignment and minimum size constraints. |
You can use this many bytes without worrying about |
overwriting other allocated objects. This is not a particularly great |
programming practice. malloc_usable_size can be more useful in |
debugging and assertions, for example: |
p = malloc(n); |
assert(malloc_usable_size(p) >= 256); |
*/ |
size_t dlmalloc_usable_size(void*); |
/* |
malloc_stats(); |
Prints on stderr the amount of space obtained from the system (both |
via sbrk and mmap), the maximum amount (which may be more than |
current if malloc_trim and/or munmap got called), and the current |
number of bytes allocated via malloc (or realloc, etc) but not yet |
freed. Note that this is the number of bytes allocated, not the |
number requested. It will be larger than the number requested |
because of alignment and bookkeeping overhead. Because it includes |
alignment wastage as being in use, this figure may be greater than |
zero even when no user-level chunks are allocated. |
The reported current and maximum system memory can be inaccurate if |
a program makes other calls to system memory allocation functions |
(normally sbrk) outside of malloc. |
malloc_stats prints only the most commonly interesting statistics. |
More information can be obtained by calling mallinfo. |
*/ |
void dlmalloc_stats(void); |
#endif /* ONLY_MSPACES */ |
#if MSPACES |
#endif /* MSPACES */ |
#ifdef __cplusplus |
}; /* end of extern "C" */ |
#endif /* __cplusplus */ |
/* |
======================================================================== |
To make a fully customizable malloc.h header file, cut everything |
above this line, put into file malloc.h, edit to suit, and #include it |
on the next line, as well as in programs that use this malloc. |
======================================================================== |
*/ |
/* #include "malloc.h" */ |
/*------------------------------ internal #includes ---------------------- */ |
#ifdef WIN32 |
#pragma warning( disable : 4146 ) /* no "unsigned" warnings */ |
#endif /* WIN32 */ |
//#include <stdio.h> /* for printing in malloc_stats */ |
#if 0 |
#ifndef LACKS_ERRNO_H |
#include <errno.h> /* for MALLOC_FAILURE_ACTION */ |
#endif /* LACKS_ERRNO_H */ |
#if FOOTERS |
#include <time.h> /* for magic initialization */ |
#endif /* FOOTERS */ |
#ifndef LACKS_STDLIB_H |
#include <stdlib.h> /* for abort() */ |
#endif /* LACKS_STDLIB_H */ |
#ifdef DEBUG |
#if ABORT_ON_ASSERT_FAILURE |
#define assert(x) if(!(x)) ABORT |
#else /* ABORT_ON_ASSERT_FAILURE */ |
#include <assert.h> |
#endif /* ABORT_ON_ASSERT_FAILURE */ |
#else /* DEBUG */ |
#define assert(x) |
#endif /* DEBUG */ |
#ifndef LACKS_STRING_H |
#include <string.h> /* for memset etc */ |
#endif /* LACKS_STRING_H */ |
#if USE_BUILTIN_FFS |
#ifndef LACKS_STRINGS_H |
#include <strings.h> /* for ffs */ |
#endif /* LACKS_STRINGS_H */ |
#endif /* USE_BUILTIN_FFS */ |
#if HAVE_MMAP |
#ifndef LACKS_SYS_MMAN_H |
#include <sys/mman.h> /* for mmap */ |
#endif /* LACKS_SYS_MMAN_H */ |
#ifndef LACKS_FCNTL_H |
#include <fcntl.h> |
#endif /* LACKS_FCNTL_H */ |
#endif /* HAVE_MMAP */ |
#if HAVE_MORECORE |
#endif /* HAVE_MMAP */ |
#endif |
#define assert(x) |
#ifndef WIN32 |
#endif |
/* ------------------- size_t and alignment properties -------------------- */ |
/* The byte and bit size of a size_t */ |
#define SIZE_T_SIZE (sizeof(size_t)) |
#define SIZE_T_BITSIZE (sizeof(size_t) << 3) |
/* Some constants coerced to size_t */ |
/* Annoying but necessary to avoid errors on some plaftorms */ |
#define SIZE_T_ZERO ((size_t)0) |
#define SIZE_T_ONE ((size_t)1) |
#define SIZE_T_TWO ((size_t)2) |
#define TWO_SIZE_T_SIZES (SIZE_T_SIZE<<1) |
#define FOUR_SIZE_T_SIZES (SIZE_T_SIZE<<2) |
#define SIX_SIZE_T_SIZES (FOUR_SIZE_T_SIZES+TWO_SIZE_T_SIZES) |
#define HALF_MAX_SIZE_T (MAX_SIZE_T / 2U) |
/* The bit mask value corresponding to MALLOC_ALIGNMENT */ |
#define CHUNK_ALIGN_MASK (MALLOC_ALIGNMENT - SIZE_T_ONE) |
/* True if address a has acceptable alignment */ |
#define is_aligned(A) (((size_t)((A)) & (CHUNK_ALIGN_MASK)) == 0) |
/* the number of bytes to offset an address to align it */ |
#define align_offset(A)\ |
((((size_t)(A) & CHUNK_ALIGN_MASK) == 0)? 0 :\ |
((MALLOC_ALIGNMENT - ((size_t)(A) & CHUNK_ALIGN_MASK)) & CHUNK_ALIGN_MASK)) |
/* -------------------------- MMAP preliminaries ------------------------- */ |
/* |
If HAVE_MORECORE or HAVE_MMAP are false, we just define calls and |
checks to fail so compiler optimizer can delete code rather than |
using so many "#if"s. |
*/ |
/* MORECORE and MMAP must return MFAIL on failure */ |
#define MFAIL ((void*)(MAX_SIZE_T)) |
#define CMFAIL ((char*)(MFAIL)) /* defined for convenience */ |
#if !HAVE_MMAP |
#else /* HAVE_MMAP */ |
#define IS_MMAPPED_BIT (SIZE_T_ONE) |
#define USE_MMAP_BIT (SIZE_T_ONE) |
#ifdef KOLIBRI |
/* Win32 MMAP via VirtualAlloc */ |
static void* win32mmap(size_t size) { |
void* ptr = KernelAlloc(size); |
return (ptr != 0)? ptr: MFAIL; |
} |
/* For direct MMAP, use MEM_TOP_DOWN to minimize interference */ |
static void* win32direct_mmap(size_t size) { |
void* ptr = KernelAlloc(size); |
return (ptr != 0)? ptr: MFAIL; |
} |
/* This function supports releasing coalesed segments */ |
static int win32munmap(void* ptr, size_t size) { |
KernelFree(ptr); |
return 0; |
} |
#else |
static void* win32mmap(size_t size) { |
void* ptr = mem_alloc(size, 3); |
return (ptr != 0)? ptr: MFAIL; |
} |
/* For direct MMAP, use MEM_TOP_DOWN to minimize interference */ |
static void* win32direct_mmap(size_t size) { |
void* ptr = mem_alloc(size, 3); |
return (ptr != 0)? ptr: MFAIL; |
} |
/* This function supports releasing coalesed segments */ |
static int win32munmap(void* ptr, size_t size) { |
mem_free(ptr); |
return 0; |
} |
#endif |
#define CALL_MMAP(s) win32mmap(s) |
#define CALL_MUNMAP(a, s) win32munmap((a), (s)) |
#define DIRECT_MMAP(s) win32direct_mmap(s) |
#endif /* HAVE_MMAP */ |
#if HAVE_MMAP && HAVE_MREMAP |
#else /* HAVE_MMAP && HAVE_MREMAP */ |
#define CALL_MREMAP(addr, osz, nsz, mv) MFAIL |
#endif /* HAVE_MMAP && HAVE_MREMAP */ |
#if HAVE_MORECORE |
#else /* HAVE_MORECORE */ |
#define CALL_MORECORE(S) MFAIL |
#endif /* HAVE_MORECORE */ |
/* mstate bit set if continguous morecore disabled or failed */ |
#define USE_NONCONTIGUOUS_BIT (4U) |
/* segment bit set in create_mspace_with_base */ |
#define EXTERN_BIT (8U) |
/* --------------------------- Lock preliminaries ------------------------ */ |
#if USE_LOCKS |
#else /* USE_LOCKS */ |
#define USE_LOCK_BIT (0U) |
#define INITIAL_LOCK(l) |
#endif /* USE_LOCKS */ |
#if USE_LOCKS && HAVE_MORECORE |
#define ACQUIRE_MORECORE_LOCK() ACQUIRE_LOCK(&morecore_mutex); |
#define RELEASE_MORECORE_LOCK() RELEASE_LOCK(&morecore_mutex); |
#else /* USE_LOCKS && HAVE_MORECORE */ |
#define ACQUIRE_MORECORE_LOCK() |
#define RELEASE_MORECORE_LOCK() |
#endif /* USE_LOCKS && HAVE_MORECORE */ |
#if USE_LOCKS |
#define ACQUIRE_MAGIC_INIT_LOCK() ACQUIRE_LOCK(&magic_init_mutex); |
#define RELEASE_MAGIC_INIT_LOCK() RELEASE_LOCK(&magic_init_mutex); |
#else /* USE_LOCKS */ |
#define ACQUIRE_MAGIC_INIT_LOCK() |
#define RELEASE_MAGIC_INIT_LOCK() |
#endif /* USE_LOCKS */ |
/* ----------------------- Chunk representations ------------------------ */ |
/* |
(The following includes lightly edited explanations by Colin Plumb.) |
The malloc_chunk declaration below is misleading (but accurate and |
necessary). It declares a "view" into memory allowing access to |
necessary fields at known offsets from a given base. |
Chunks of memory are maintained using a `boundary tag' method as |
originally described by Knuth. (See the paper by Paul Wilson |
ftp://ftp.cs.utexas.edu/pub/garbage/allocsrv.ps for a survey of such |
techniques.) Sizes of free chunks are stored both in the front of |
each chunk and at the end. This makes consolidating fragmented |
chunks into bigger chunks fast. The head fields also hold bits |
representing whether chunks are free or in use. |
Here are some pictures to make it clearer. They are "exploded" to |
show that the state of a chunk can be thought of as extending from |
the high 31 bits of the head field of its header through the |
prev_foot and PINUSE_BIT bit of the following chunk header. |
A chunk that's in use looks like: |
chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
| Size of previous chunk (if P = 1) | |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |P| |
| Size of this chunk 1| +-+ |
mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
| | |
+- -+ |
| | |
+- -+ |
| : |
+- size - sizeof(size_t) available payload bytes -+ |
: | |
chunk-> +- -+ |
| | |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |1| |
| Size of next chunk (may or may not be in use) | +-+ |
mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
And if it's free, it looks like this: |
chunk-> +- -+ |
| User payload (must be in use, or we would have merged!) | |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |P| |
| Size of this chunk 0| +-+ |
mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
| Next pointer | |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
| Prev pointer | |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
| : |
+- size - sizeof(struct chunk) unused bytes -+ |
: | |
chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
| Size of this chunk | |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |0| |
| Size of next chunk (must be in use, or we would have merged)| +-+ |
mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
| : |
+- User payload -+ |
: | |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
|0| |
+-+ |
Note that since we always merge adjacent free chunks, the chunks |
adjacent to a free chunk must be in use. |
Given a pointer to a chunk (which can be derived trivially from the |
payload pointer) we can, in O(1) time, find out whether the adjacent |
chunks are free, and if so, unlink them from the lists that they |
are on and merge them with the current chunk. |
Chunks always begin on even word boundaries, so the mem portion |
(which is returned to the user) is also on an even word boundary, and |
thus at least double-word aligned. |
The P (PINUSE_BIT) bit, stored in the unused low-order bit of the |
chunk size (which is always a multiple of two words), is an in-use |
bit for the *previous* chunk. If that bit is *clear*, then the |
word before the current chunk size contains the previous chunk |
size, and can be used to find the front of the previous chunk. |
The very first chunk allocated always has this bit set, preventing |
access to non-existent (or non-owned) memory. If pinuse is set for |
any given chunk, then you CANNOT determine the size of the |
previous chunk, and might even get a memory addressing fault when |
trying to do so. |
The C (CINUSE_BIT) bit, stored in the unused second-lowest bit of |
the chunk size redundantly records whether the current chunk is |
inuse. This redundancy enables usage checks within free and realloc, |
and reduces indirection when freeing and consolidating chunks. |
Each freshly allocated chunk must have both cinuse and pinuse set. |
That is, each allocated chunk borders either a previously allocated |
and still in-use chunk, or the base of its memory arena. This is |
ensured by making all allocations from the the `lowest' part of any |
found chunk. Further, no free chunk physically borders another one, |
so each free chunk is known to be preceded and followed by either |
inuse chunks or the ends of memory. |
Note that the `foot' of the current chunk is actually represented |
as the prev_foot of the NEXT chunk. This makes it easier to |
deal with alignments etc but can be very confusing when trying |
to extend or adapt this code. |
The exceptions to all this are |
1. The special chunk `top' is the top-most available chunk (i.e., |
the one bordering the end of available memory). It is treated |
specially. Top is never included in any bin, is used only if |
no other chunk is available, and is released back to the |
system if it is very large (see M_TRIM_THRESHOLD). In effect, |
the top chunk is treated as larger (and thus less well |
fitting) than any other available chunk. The top chunk |
doesn't update its trailing size field since there is no next |
contiguous chunk that would have to index off it. However, |
space is still allocated for it (TOP_FOOT_SIZE) to enable |
separation or merging when space is extended. |
3. Chunks allocated via mmap, which have the lowest-order bit |
(IS_MMAPPED_BIT) set in their prev_foot fields, and do not set |
PINUSE_BIT in their head fields. Because they are allocated |
one-by-one, each must carry its own prev_foot field, which is |
also used to hold the offset this chunk has within its mmapped |
region, which is needed to preserve alignment. Each mmapped |
chunk is trailed by the first two fields of a fake next-chunk |
for sake of usage checks. |
*/ |
struct malloc_chunk { |
size_t prev_foot; /* Size of previous chunk (if free). */ |
size_t head; /* Size and inuse bits. */ |
struct malloc_chunk* fd; /* double links -- used only if free. */ |
struct malloc_chunk* bk; |
}; |
typedef struct malloc_chunk mchunk; |
typedef struct malloc_chunk* mchunkptr; |
typedef struct malloc_chunk* sbinptr; /* The type of bins of chunks */ |
typedef unsigned int bindex_t; /* Described below */ |
typedef unsigned int binmap_t; /* Described below */ |
typedef unsigned int flag_t; /* The type of various bit flag sets */ |
/* ------------------- Chunks sizes and alignments ----------------------- */ |
#define MCHUNK_SIZE (sizeof(mchunk)) |
#if FOOTERS |
#define CHUNK_OVERHEAD (TWO_SIZE_T_SIZES) |
#else /* FOOTERS */ |
#define CHUNK_OVERHEAD (SIZE_T_SIZE) |
#endif /* FOOTERS */ |
/* MMapped chunks need a second word of overhead ... */ |
#define MMAP_CHUNK_OVERHEAD (TWO_SIZE_T_SIZES) |
/* ... and additional padding for fake next-chunk at foot */ |
#define MMAP_FOOT_PAD (FOUR_SIZE_T_SIZES) |
/* The smallest size we can malloc is an aligned minimal chunk */ |
#define MIN_CHUNK_SIZE\ |
((MCHUNK_SIZE + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK) |
/* conversion from malloc headers to user pointers, and back */ |
#define chunk2mem(p) ((void*)((char*)(p) + TWO_SIZE_T_SIZES)) |
#define mem2chunk(mem) ((mchunkptr)((char*)(mem) - TWO_SIZE_T_SIZES)) |
/* chunk associated with aligned address A */ |
#define align_as_chunk(A) (mchunkptr)((A) + align_offset(chunk2mem(A))) |
/* Bounds on request (not chunk) sizes. */ |
#define MAX_REQUEST ((-MIN_CHUNK_SIZE) << 2) |
#define MIN_REQUEST (MIN_CHUNK_SIZE - CHUNK_OVERHEAD - SIZE_T_ONE) |
/* pad request bytes into a usable size */ |
#define pad_request(req) \ |
(((req) + CHUNK_OVERHEAD + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK) |
/* pad request, checking for minimum (but not maximum) */ |
#define request2size(req) \ |
(((req) < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(req)) |
/* ------------------ Operations on head and foot fields ----------------- */ |
/* |
The head field of a chunk is or'ed with PINUSE_BIT when previous |
adjacent chunk in use, and or'ed with CINUSE_BIT if this chunk is in |
use. If the chunk was obtained with mmap, the prev_foot field has |
IS_MMAPPED_BIT set, otherwise holding the offset of the base of the |
mmapped region to the base of the chunk. |
*/ |
#define PINUSE_BIT (SIZE_T_ONE) |
#define CINUSE_BIT (SIZE_T_TWO) |
#define INUSE_BITS (PINUSE_BIT|CINUSE_BIT) |
/* Head value for fenceposts */ |
#define FENCEPOST_HEAD (INUSE_BITS|SIZE_T_SIZE) |
/* extraction of fields from head words */ |
#define cinuse(p) ((p)->head & CINUSE_BIT) |
#define pinuse(p) ((p)->head & PINUSE_BIT) |
#define chunksize(p) ((p)->head & ~(INUSE_BITS)) |
#define clear_pinuse(p) ((p)->head &= ~PINUSE_BIT) |
#define clear_cinuse(p) ((p)->head &= ~CINUSE_BIT) |
/* Treat space at ptr +/- offset as a chunk */ |
#define chunk_plus_offset(p, s) ((mchunkptr)(((char*)(p)) + (s))) |
#define chunk_minus_offset(p, s) ((mchunkptr)(((char*)(p)) - (s))) |
/* Ptr to next or previous physical malloc_chunk. */ |
#define next_chunk(p) ((mchunkptr)( ((char*)(p)) + ((p)->head & ~INUSE_BITS))) |
#define prev_chunk(p) ((mchunkptr)( ((char*)(p)) - ((p)->prev_foot) )) |
/* extract next chunk's pinuse bit */ |
#define next_pinuse(p) ((next_chunk(p)->head) & PINUSE_BIT) |
/* Get/set size at footer */ |
#define get_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_foot) |
#define set_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_foot = (s)) |
/* Set size, pinuse bit, and foot */ |
#define set_size_and_pinuse_of_free_chunk(p, s)\ |
((p)->head = (s|PINUSE_BIT), set_foot(p, s)) |
/* Set size, pinuse bit, foot, and clear next pinuse */ |
#define set_free_with_pinuse(p, s, n)\ |
(clear_pinuse(n), set_size_and_pinuse_of_free_chunk(p, s)) |
#define is_mmapped(p)\ |
(!((p)->head & PINUSE_BIT) && ((p)->prev_foot & IS_MMAPPED_BIT)) |
/* Get the internal overhead associated with chunk p */ |
#define overhead_for(p)\ |
(is_mmapped(p)? MMAP_CHUNK_OVERHEAD : CHUNK_OVERHEAD) |
/* Return true if malloced space is not necessarily cleared */ |
#if MMAP_CLEARS |
#define calloc_must_clear(p) (!is_mmapped(p)) |
#else /* MMAP_CLEARS */ |
#define calloc_must_clear(p) (1) |
#endif /* MMAP_CLEARS */ |
/* ---------------------- Overlaid data structures ----------------------- */ |
/* |
When chunks are not in use, they are treated as nodes of either |
lists or trees. |
"Small" chunks are stored in circular doubly-linked lists, and look |
like this: |
chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
| Size of previous chunk | |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
`head:' | Size of chunk, in bytes |P| |
mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
| Forward pointer to next chunk in list | |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
| Back pointer to previous chunk in list | |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
| Unused space (may be 0 bytes long) . |
. . |
. | |
nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
`foot:' | Size of chunk, in bytes | |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
Larger chunks are kept in a form of bitwise digital trees (aka |
tries) keyed on chunksizes. Because malloc_tree_chunks are only for |
free chunks greater than 256 bytes, their size doesn't impose any |
constraints on user chunk sizes. Each node looks like: |
chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
| Size of previous chunk | |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
`head:' | Size of chunk, in bytes |P| |
mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
| Forward pointer to next chunk of same size | |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
| Back pointer to previous chunk of same size | |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
| Pointer to left child (child[0]) | |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
| Pointer to right child (child[1]) | |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
| Pointer to parent | |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
| bin index of this chunk | |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
| Unused space . |
. | |
nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
`foot:' | Size of chunk, in bytes | |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
Each tree holding treenodes is a tree of unique chunk sizes. Chunks |
of the same size are arranged in a circularly-linked list, with only |
the oldest chunk (the next to be used, in our FIFO ordering) |
actually in the tree. (Tree members are distinguished by a non-null |
parent pointer.) If a chunk with the same size an an existing node |
is inserted, it is linked off the existing node using pointers that |
work in the same way as fd/bk pointers of small chunks. |
Each tree contains a power of 2 sized range of chunk sizes (the |
smallest is 0x100 <= x < 0x180), which is is divided in half at each |
tree level, with the chunks in the smaller half of the range (0x100 |
<= x < 0x140 for the top nose) in the left subtree and the larger |
half (0x140 <= x < 0x180) in the right subtree. This is, of course, |
done by inspecting individual bits. |
Using these rules, each node's left subtree contains all smaller |
sizes than its right subtree. However, the node at the root of each |
subtree has no particular ordering relationship to either. (The |
dividing line between the subtree sizes is based on trie relation.) |
If we remove the last chunk of a given size from the interior of the |
tree, we need to replace it with a leaf node. The tree ordering |
rules permit a node to be replaced by any leaf below it. |
The smallest chunk in a tree (a common operation in a best-fit |
allocator) can be found by walking a path to the leftmost leaf in |
the tree. Unlike a usual binary tree, where we follow left child |
pointers until we reach a null, here we follow the right child |
pointer any time the left one is null, until we reach a leaf with |
both child pointers null. The smallest chunk in the tree will be |
somewhere along that path. |
The worst case number of steps to add, find, or remove a node is |
bounded by the number of bits differentiating chunks within |
bins. Under current bin calculations, this ranges from 6 up to 21 |
(for 32 bit sizes) or up to 53 (for 64 bit sizes). The typical case |
is of course much better. |
*/ |
struct malloc_tree_chunk { |
/* The first four fields must be compatible with malloc_chunk */ |
size_t prev_foot; |
size_t head; |
struct malloc_tree_chunk* fd; |
struct malloc_tree_chunk* bk; |
struct malloc_tree_chunk* child[2]; |
struct malloc_tree_chunk* parent; |
bindex_t index; |
}; |
typedef struct malloc_tree_chunk tchunk; |
typedef struct malloc_tree_chunk* tchunkptr; |
typedef struct malloc_tree_chunk* tbinptr; /* The type of bins of trees */ |
/* A little helper macro for trees */ |
#define leftmost_child(t) ((t)->child[0] != 0? (t)->child[0] : (t)->child[1]) |
/* ----------------------------- Segments -------------------------------- */ |
/* |
Each malloc space may include non-contiguous segments, held in a |
list headed by an embedded malloc_segment record representing the |
top-most space. Segments also include flags holding properties of |
the space. Large chunks that are directly allocated by mmap are not |
included in this list. They are instead independently created and |
destroyed without otherwise keeping track of them. |
Segment management mainly comes into play for spaces allocated by |
MMAP. Any call to MMAP might or might not return memory that is |
adjacent to an existing segment. MORECORE normally contiguously |
extends the current space, so this space is almost always adjacent, |
which is simpler and faster to deal with. (This is why MORECORE is |
used preferentially to MMAP when both are available -- see |
sys_alloc.) When allocating using MMAP, we don't use any of the |
hinting mechanisms (inconsistently) supported in various |
implementations of unix mmap, or distinguish reserving from |
committing memory. Instead, we just ask for space, and exploit |
contiguity when we get it. It is probably possible to do |
better than this on some systems, but no general scheme seems |
to be significantly better. |
Management entails a simpler variant of the consolidation scheme |
used for chunks to reduce fragmentation -- new adjacent memory is |
normally prepended or appended to an existing segment. However, |
there are limitations compared to chunk consolidation that mostly |
reflect the fact that segment processing is relatively infrequent |
(occurring only when getting memory from system) and that we |
don't expect to have huge numbers of segments: |
* Segments are not indexed, so traversal requires linear scans. (It |
would be possible to index these, but is not worth the extra |
overhead and complexity for most programs on most platforms.) |
* New segments are only appended to old ones when holding top-most |
memory; if they cannot be prepended to others, they are held in |
different segments. |
Except for the top-most segment of an mstate, each segment record |
is kept at the tail of its segment. Segments are added by pushing |
segment records onto the list headed by &mstate.seg for the |
containing mstate. |
Segment flags control allocation/merge/deallocation policies: |
* If EXTERN_BIT set, then we did not allocate this segment, |
and so should not try to deallocate or merge with others. |
(This currently holds only for the initial segment passed |
into create_mspace_with_base.) |
* If IS_MMAPPED_BIT set, the segment may be merged with |
other surrounding mmapped segments and trimmed/de-allocated |
using munmap. |
* If neither bit is set, then the segment was obtained using |
MORECORE so can be merged with surrounding MORECORE'd segments |
and deallocated/trimmed using MORECORE with negative arguments. |
*/ |
struct malloc_segment { |
char* base; /* base address */ |
size_t size; /* allocated size */ |
struct malloc_segment* next; /* ptr to next segment */ |
flag_t sflags; /* mmap and extern flag */ |
}; |
#define is_mmapped_segment(S) ((S)->sflags & IS_MMAPPED_BIT) |
#define is_extern_segment(S) ((S)->sflags & EXTERN_BIT) |
typedef struct malloc_segment msegment; |
typedef struct malloc_segment* msegmentptr; |
/* ---------------------------- malloc_state ----------------------------- */ |
/* |
A malloc_state holds all of the bookkeeping for a space. |
The main fields are: |
Top |
The topmost chunk of the currently active segment. Its size is |
cached in topsize. The actual size of topmost space is |
topsize+TOP_FOOT_SIZE, which includes space reserved for adding |
fenceposts and segment records if necessary when getting more |
space from the system. The size at which to autotrim top is |
cached from mparams in trim_check, except that it is disabled if |
an autotrim fails. |
Designated victim (dv) |
This is the preferred chunk for servicing small requests that |
don't have exact fits. It is normally the chunk split off most |
recently to service another small request. Its size is cached in |
dvsize. The link fields of this chunk are not maintained since it |
is not kept in a bin. |
SmallBins |
An array of bin headers for free chunks. These bins hold chunks |
with sizes less than MIN_LARGE_SIZE bytes. Each bin contains |
chunks of all the same size, spaced 8 bytes apart. To simplify |
use in double-linked lists, each bin header acts as a malloc_chunk |
pointing to the real first node, if it exists (else pointing to |
itself). This avoids special-casing for headers. But to avoid |
waste, we allocate only the fd/bk pointers of bins, and then use |
repositioning tricks to treat these as the fields of a chunk. |
TreeBins |
Treebins are pointers to the roots of trees holding a range of |
sizes. There are 2 equally spaced treebins for each power of two |
from TREE_SHIFT to TREE_SHIFT+16. The last bin holds anything |
larger. |
Bin maps |
There is one bit map for small bins ("smallmap") and one for |
treebins ("treemap). Each bin sets its bit when non-empty, and |
clears the bit when empty. Bit operations are then used to avoid |
bin-by-bin searching -- nearly all "search" is done without ever |
looking at bins that won't be selected. The bit maps |
conservatively use 32 bits per map word, even if on 64bit system. |
For a good description of some of the bit-based techniques used |
here, see Henry S. Warren Jr's book "Hacker's Delight" (and |
supplement at http://hackersdelight.org/). Many of these are |
intended to reduce the branchiness of paths through malloc etc, as |
well as to reduce the number of memory locations read or written. |
Segments |
A list of segments headed by an embedded malloc_segment record |
representing the initial space. |
Address check support |
The least_addr field is the least address ever obtained from |
MORECORE or MMAP. Attempted frees and reallocs of any address less |
than this are trapped (unless INSECURE is defined). |
Magic tag |
A cross-check field that should always hold same value as mparams.magic. |
Flags |
Bits recording whether to use MMAP, locks, or contiguous MORECORE |
Statistics |
Each space keeps track of current and maximum system memory |
obtained via MORECORE or MMAP. |
Locking |
If USE_LOCKS is defined, the "mutex" lock is acquired and released |
around every public call using this mspace. |
*/ |
/* Bin types, widths and sizes */ |
#define NSMALLBINS (32U) |
#define NTREEBINS (32U) |
#define SMALLBIN_SHIFT (3U) |
#define SMALLBIN_WIDTH (SIZE_T_ONE << SMALLBIN_SHIFT) |
#define TREEBIN_SHIFT (8U) |
#define MIN_LARGE_SIZE (SIZE_T_ONE << TREEBIN_SHIFT) |
#define MAX_SMALL_SIZE (MIN_LARGE_SIZE - SIZE_T_ONE) |
#define MAX_SMALL_REQUEST (MAX_SMALL_SIZE - CHUNK_ALIGN_MASK - CHUNK_OVERHEAD) |
struct malloc_state { |
binmap_t smallmap; |
binmap_t treemap; |
size_t dvsize; |
size_t topsize; |
char* least_addr; |
mchunkptr dv; |
mchunkptr top; |
size_t trim_check; |
size_t magic; |
mchunkptr smallbins[(NSMALLBINS+1)*2]; |
tbinptr treebins[NTREEBINS]; |
size_t footprint; |
size_t max_footprint; |
flag_t mflags; |
#if USE_LOCKS |
MLOCK_T mutex; /* locate lock among fields that rarely change */ |
#endif /* USE_LOCKS */ |
msegment seg; |
}; |
typedef struct malloc_state* mstate; |
/* ------------- Global malloc_state and malloc_params ------------------- */ |
/* |
malloc_params holds global properties, including those that can be |
dynamically set using mallopt. There is a single instance, mparams, |
initialized in init_mparams. |
*/ |
struct malloc_params { |
size_t magic; |
size_t page_size; |
size_t granularity; |
size_t mmap_threshold; |
size_t trim_threshold; |
flag_t default_mflags; |
}; |
static struct malloc_params mparams; |
/* The global malloc_state used for all non-"mspace" calls */ |
static struct malloc_state _gm_; |
#define gm (&_gm_) |
#define is_global(M) ((M) == &_gm_) |
#define is_initialized(M) ((M)->top != 0) |
/* -------------------------- system alloc setup ------------------------- */ |
/* Operations on mflags */ |
#define use_lock(M) ((M)->mflags & USE_LOCK_BIT) |
#define enable_lock(M) ((M)->mflags |= USE_LOCK_BIT) |
#define disable_lock(M) ((M)->mflags &= ~USE_LOCK_BIT) |
#define use_mmap(M) ((M)->mflags & USE_MMAP_BIT) |
#define enable_mmap(M) ((M)->mflags |= USE_MMAP_BIT) |
#define disable_mmap(M) ((M)->mflags &= ~USE_MMAP_BIT) |
#define use_noncontiguous(M) ((M)->mflags & USE_NONCONTIGUOUS_BIT) |
#define disable_contiguous(M) ((M)->mflags |= USE_NONCONTIGUOUS_BIT) |
#define set_lock(M,L)\ |
((M)->mflags = (L)?\ |
((M)->mflags | USE_LOCK_BIT) :\ |
((M)->mflags & ~USE_LOCK_BIT)) |
/* page-align a size */ |
#define page_align(S)\ |
(((S) + (mparams.page_size)) & ~(mparams.page_size - SIZE_T_ONE)) |
/* granularity-align a size */ |
#define granularity_align(S)\ |
(((S) + (mparams.granularity)) & ~(mparams.granularity - SIZE_T_ONE)) |
#define is_page_aligned(S)\ |
(((size_t)(S) & (mparams.page_size - SIZE_T_ONE)) == 0) |
#define is_granularity_aligned(S)\ |
(((size_t)(S) & (mparams.granularity - SIZE_T_ONE)) == 0) |
/* True if segment S holds address A */ |
#define segment_holds(S, A)\ |
((char*)(A) >= S->base && (char*)(A) < S->base + S->size) |
/* Return segment holding given address */ |
static msegmentptr segment_holding(mstate m, char* addr) { |
msegmentptr sp = &m->seg; |
for (;;) { |
if (addr >= sp->base && addr < sp->base + sp->size) |
return sp; |
if ((sp = sp->next) == 0) |
return 0; |
} |
} |
/* Return true if segment contains a segment link */ |
static int has_segment_link(mstate m, msegmentptr ss) { |
msegmentptr sp = &m->seg; |
for (;;) { |
if ((char*)sp >= ss->base && (char*)sp < ss->base + ss->size) |
return 1; |
if ((sp = sp->next) == 0) |
return 0; |
} |
} |
#ifndef MORECORE_CANNOT_TRIM |
#define should_trim(M,s) ((s) > (M)->trim_check) |
#else /* MORECORE_CANNOT_TRIM */ |
#define should_trim(M,s) (0) |
#endif /* MORECORE_CANNOT_TRIM */ |
/* |
TOP_FOOT_SIZE is padding at the end of a segment, including space |
that may be needed to place segment records and fenceposts when new |
noncontiguous segments are added. |
*/ |
#define TOP_FOOT_SIZE\ |
(align_offset(chunk2mem(0))+pad_request(sizeof(struct malloc_segment))+MIN_CHUNK_SIZE) |
/* ------------------------------- Hooks -------------------------------- */ |
/* |
PREACTION should be defined to return 0 on success, and nonzero on |
failure. If you are not using locking, you can redefine these to do |
anything you like. |
*/ |
#if USE_LOCKS |
/* Ensure locks are initialized */ |
#define GLOBALLY_INITIALIZE() (mparams.page_size == 0 && init_mparams()) |
#define PREACTION(M) ((GLOBALLY_INITIALIZE() || use_lock(M))? ACQUIRE_LOCK(&(M)->mutex) : 0) |
#define POSTACTION(M) { if (use_lock(M)) RELEASE_LOCK(&(M)->mutex); } |
#else /* USE_LOCKS */ |
#ifndef PREACTION |
#define PREACTION(M) (0) |
#endif /* PREACTION */ |
#ifndef POSTACTION |
#define POSTACTION(M) |
#endif /* POSTACTION */ |
#endif /* USE_LOCKS */ |
/* |
CORRUPTION_ERROR_ACTION is triggered upon detected bad addresses. |
USAGE_ERROR_ACTION is triggered on detected bad frees and |
reallocs. The argument p is an address that might have triggered the |
fault. It is ignored by the two predefined actions, but might be |
useful in custom actions that try to help diagnose errors. |
*/ |
#if PROCEED_ON_ERROR |
/* A count of the number of corruption errors causing resets */ |
int malloc_corruption_error_count; |
/* default corruption action */ |
static void reset_on_error(mstate m); |
#define CORRUPTION_ERROR_ACTION(m) reset_on_error(m) |
#define USAGE_ERROR_ACTION(m, p) |
#else /* PROCEED_ON_ERROR */ |
#ifndef CORRUPTION_ERROR_ACTION |
#define CORRUPTION_ERROR_ACTION(m) ABORT |
#endif /* CORRUPTION_ERROR_ACTION */ |
#ifndef USAGE_ERROR_ACTION |
#define USAGE_ERROR_ACTION(m,p) ABORT |
#endif /* USAGE_ERROR_ACTION */ |
#endif /* PROCEED_ON_ERROR */ |
/* -------------------------- Debugging setup ---------------------------- */ |
#if ! DEBUG |
#define check_free_chunk(M,P) |
#define check_inuse_chunk(M,P) |
#define check_malloced_chunk(M,P,N) |
#define check_mmapped_chunk(M,P) |
#define check_malloc_state(M) |
#define check_top_chunk(M,P) |
#else /* DEBUG */ |
#define check_free_chunk(M,P) do_check_free_chunk(M,P) |
#define check_inuse_chunk(M,P) do_check_inuse_chunk(M,P) |
#define check_top_chunk(M,P) do_check_top_chunk(M,P) |
#define check_malloced_chunk(M,P,N) do_check_malloced_chunk(M,P,N) |
#define check_mmapped_chunk(M,P) do_check_mmapped_chunk(M,P) |
#define check_malloc_state(M) do_check_malloc_state(M) |
static void do_check_any_chunk(mstate m, mchunkptr p); |
static void do_check_top_chunk(mstate m, mchunkptr p); |
static void do_check_mmapped_chunk(mstate m, mchunkptr p); |
static void do_check_inuse_chunk(mstate m, mchunkptr p); |
static void do_check_free_chunk(mstate m, mchunkptr p); |
static void do_check_malloced_chunk(mstate m, void* mem, size_t s); |
static void do_check_tree(mstate m, tchunkptr t); |
static void do_check_treebin(mstate m, bindex_t i); |
static void do_check_smallbin(mstate m, bindex_t i); |
static void do_check_malloc_state(mstate m); |
static int bin_find(mstate m, mchunkptr x); |
static size_t traverse_and_check(mstate m); |
#endif /* DEBUG */ |
/* ---------------------------- Indexing Bins ---------------------------- */ |
#define is_small(s) (((s) >> SMALLBIN_SHIFT) < NSMALLBINS) |
#define small_index(s) ((s) >> SMALLBIN_SHIFT) |
#define small_index2size(i) ((i) << SMALLBIN_SHIFT) |
#define MIN_SMALL_INDEX (small_index(MIN_CHUNK_SIZE)) |
/* addressing by index. See above about smallbin repositioning */ |
#define smallbin_at(M, i) ((sbinptr)((char*)&((M)->smallbins[(i)<<1]))) |
#define treebin_at(M,i) (&((M)->treebins[i])) |
/* assign tree index for size S to variable I */ |
#if defined(__GNUC__) && defined(i386) |
#define compute_tree_index(S, I)\ |
{\ |
size_t X = S >> TREEBIN_SHIFT;\ |
if (X == 0)\ |
I = 0;\ |
else if (X > 0xFFFF)\ |
I = NTREEBINS-1;\ |
else {\ |
unsigned int K;\ |
__asm__("bsrl %1,%0\n\t" : "=r" (K) : "rm" (X));\ |
I = (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\ |
}\ |
} |
#else /* GNUC */ |
#define compute_tree_index(S, I)\ |
{\ |
size_t X = S >> TREEBIN_SHIFT;\ |
if (X == 0)\ |
I = 0;\ |
else if (X > 0xFFFF)\ |
I = NTREEBINS-1;\ |
else {\ |
unsigned int Y = (unsigned int)X;\ |
unsigned int N = ((Y - 0x100) >> 16) & 8;\ |
unsigned int K = (((Y <<= N) - 0x1000) >> 16) & 4;\ |
N += K;\ |
N += K = (((Y <<= K) - 0x4000) >> 16) & 2;\ |
K = 14 - N + ((Y <<= K) >> 15);\ |
I = (K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1));\ |
}\ |
} |
#endif /* GNUC */ |
/* Bit representing maximum resolved size in a treebin at i */ |
#define bit_for_tree_index(i) \ |
(i == NTREEBINS-1)? (SIZE_T_BITSIZE-1) : (((i) >> 1) + TREEBIN_SHIFT - 2) |
/* Shift placing maximum resolved bit in a treebin at i as sign bit */ |
#define leftshift_for_tree_index(i) \ |
((i == NTREEBINS-1)? 0 : \ |
((SIZE_T_BITSIZE-SIZE_T_ONE) - (((i) >> 1) + TREEBIN_SHIFT - 2))) |
/* The size of the smallest chunk held in bin with index i */ |
#define minsize_for_tree_index(i) \ |
((SIZE_T_ONE << (((i) >> 1) + TREEBIN_SHIFT)) | \ |
(((size_t)((i) & SIZE_T_ONE)) << (((i) >> 1) + TREEBIN_SHIFT - 1))) |
/* ------------------------ Operations on bin maps ----------------------- */ |
/* bit corresponding to given index */ |
#define idx2bit(i) ((binmap_t)(1) << (i)) |
/* Mark/Clear bits with given index */ |
#define mark_smallmap(M,i) ((M)->smallmap |= idx2bit(i)) |
#define clear_smallmap(M,i) ((M)->smallmap &= ~idx2bit(i)) |
#define smallmap_is_marked(M,i) ((M)->smallmap & idx2bit(i)) |
#define mark_treemap(M,i) ((M)->treemap |= idx2bit(i)) |
#define clear_treemap(M,i) ((M)->treemap &= ~idx2bit(i)) |
#define treemap_is_marked(M,i) ((M)->treemap & idx2bit(i)) |
/* index corresponding to given bit */ |
#if defined(__GNUC__) && defined(i386) |
#define compute_bit2idx(X, I)\ |
{\ |
unsigned int J;\ |
__asm__("bsfl %1,%0\n\t" : "=r" (J) : "rm" (X));\ |
I = (bindex_t)J;\ |
} |
#else /* GNUC */ |
#if USE_BUILTIN_FFS |
#define compute_bit2idx(X, I) I = ffs(X)-1 |
#else /* USE_BUILTIN_FFS */ |
#define compute_bit2idx(X, I)\ |
{\ |
unsigned int Y = X - 1;\ |
unsigned int K = Y >> (16-4) & 16;\ |
unsigned int N = K; Y >>= K;\ |
N += K = Y >> (8-3) & 8; Y >>= K;\ |
N += K = Y >> (4-2) & 4; Y >>= K;\ |
N += K = Y >> (2-1) & 2; Y >>= K;\ |
N += K = Y >> (1-0) & 1; Y >>= K;\ |
I = (bindex_t)(N + Y);\ |
} |
#endif /* USE_BUILTIN_FFS */ |
#endif /* GNUC */ |
/* isolate the least set bit of a bitmap */ |
#define least_bit(x) ((x) & -(x)) |
/* mask with all bits to left of least bit of x on */ |
#define left_bits(x) ((x<<1) | -(x<<1)) |
/* mask with all bits to left of or equal to least bit of x on */ |
#define same_or_left_bits(x) ((x) | -(x)) |
/* ----------------------- Runtime Check Support ------------------------- */ |
/* |
For security, the main invariant is that malloc/free/etc never |
writes to a static address other than malloc_state, unless static |
malloc_state itself has been corrupted, which cannot occur via |
malloc (because of these checks). In essence this means that we |
believe all pointers, sizes, maps etc held in malloc_state, but |
check all of those linked or offsetted from other embedded data |
structures. These checks are interspersed with main code in a way |
that tends to minimize their run-time cost. |
When FOOTERS is defined, in addition to range checking, we also |
verify footer fields of inuse chunks, which can be used guarantee |
that the mstate controlling malloc/free is intact. This is a |
streamlined version of the approach described by William Robertson |
et al in "Run-time Detection of Heap-based Overflows" LISA'03 |
http://www.usenix.org/events/lisa03/tech/robertson.html The footer |
of an inuse chunk holds the xor of its mstate and a random seed, |
that is checked upon calls to free() and realloc(). This is |
(probablistically) unguessable from outside the program, but can be |
computed by any code successfully malloc'ing any chunk, so does not |
itself provide protection against code that has already broken |
security through some other means. Unlike Robertson et al, we |
always dynamically check addresses of all offset chunks (previous, |
next, etc). This turns out to be cheaper than relying on hashes. |
*/ |
#if !INSECURE |
/* Check if address a is at least as high as any from MORECORE or MMAP */ |
#define ok_address(M, a) ((char*)(a) >= (M)->least_addr) |
/* Check if address of next chunk n is higher than base chunk p */ |
#define ok_next(p, n) ((char*)(p) < (char*)(n)) |
/* Check if p has its cinuse bit on */ |
#define ok_cinuse(p) cinuse(p) |
/* Check if p has its pinuse bit on */ |
#define ok_pinuse(p) pinuse(p) |
#else /* !INSECURE */ |
#define ok_address(M, a) (1) |
#define ok_next(b, n) (1) |
#define ok_cinuse(p) (1) |
#define ok_pinuse(p) (1) |
#endif /* !INSECURE */ |
#if (FOOTERS && !INSECURE) |
/* Check if (alleged) mstate m has expected magic field */ |
#define ok_magic(M) ((M)->magic == mparams.magic) |
#else /* (FOOTERS && !INSECURE) */ |
#define ok_magic(M) (1) |
#endif /* (FOOTERS && !INSECURE) */ |
/* In gcc, use __builtin_expect to minimize impact of checks */ |
#if !INSECURE |
#if defined(__GNUC__) && __GNUC__ >= 3 |
#define RTCHECK(e) __builtin_expect(e, 1) |
#else /* GNUC */ |
#define RTCHECK(e) (e) |
#endif /* GNUC */ |
#else /* !INSECURE */ |
#define RTCHECK(e) (1) |
#endif /* !INSECURE */ |
/* macros to set up inuse chunks with or without footers */ |
#if !FOOTERS |
#define mark_inuse_foot(M,p,s) |
/* Set cinuse bit and pinuse bit of next chunk */ |
#define set_inuse(M,p,s)\ |
((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\ |
((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT) |
/* Set cinuse and pinuse of this chunk and pinuse of next chunk */ |
#define set_inuse_and_pinuse(M,p,s)\ |
((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\ |
((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT) |
/* Set size, cinuse and pinuse bit of this chunk */ |
#define set_size_and_pinuse_of_inuse_chunk(M, p, s)\ |
((p)->head = (s|PINUSE_BIT|CINUSE_BIT)) |
#else /* FOOTERS */ |
/* Set foot of inuse chunk to be xor of mstate and seed */ |
#define mark_inuse_foot(M,p,s)\ |
(((mchunkptr)((char*)(p) + (s)))->prev_foot = ((size_t)(M) ^ mparams.magic)) |
#define get_mstate_for(p)\ |
((mstate)(((mchunkptr)((char*)(p) +\ |
(chunksize(p))))->prev_foot ^ mparams.magic)) |
#define set_inuse(M,p,s)\ |
((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\ |
(((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT), \ |
mark_inuse_foot(M,p,s)) |
#define set_inuse_and_pinuse(M,p,s)\ |
((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\ |
(((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT),\ |
mark_inuse_foot(M,p,s)) |
#define set_size_and_pinuse_of_inuse_chunk(M, p, s)\ |
((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\ |
mark_inuse_foot(M, p, s)) |
#endif /* !FOOTERS */ |
/* ---------------------------- setting mparams -------------------------- */ |
/* Initialize mparams */ |
static int init_mparams(void) { |
if (mparams.page_size == 0) { |
size_t s; |
mparams.mmap_threshold = DEFAULT_MMAP_THRESHOLD; |
mparams.trim_threshold = DEFAULT_TRIM_THRESHOLD; |
#if MORECORE_CONTIGUOUS |
mparams.default_mflags = USE_LOCK_BIT|USE_MMAP_BIT; |
#else /* MORECORE_CONTIGUOUS */ |
mparams.default_mflags = USE_LOCK_BIT|USE_MMAP_BIT|USE_NONCONTIGUOUS_BIT; |
#endif /* MORECORE_CONTIGUOUS */ |
#if (FOOTERS && !INSECURE) |
{ |
#if USE_DEV_RANDOM |
int fd; |
unsigned char buf[sizeof(size_t)]; |
/* Try to use /dev/urandom, else fall back on using time */ |
if ((fd = open("/dev/urandom", O_RDONLY)) >= 0 && |
read(fd, buf, sizeof(buf)) == sizeof(buf)) { |
s = *((size_t *) buf); |
close(fd); |
} |
else |
#endif /* USE_DEV_RANDOM */ |
s = (size_t)(time(0) ^ (size_t)0x55555555U); |
s |= (size_t)8U; /* ensure nonzero */ |
s &= ~(size_t)7U; /* improve chances of fault for bad values */ |
} |
#else /* (FOOTERS && !INSECURE) */ |
s = (size_t)0x58585858U; |
#endif /* (FOOTERS && !INSECURE) */ |
ACQUIRE_MAGIC_INIT_LOCK(); |
if (mparams.magic == 0) { |
mparams.magic = s; |
/* Set up lock for main malloc area */ |
INITIAL_LOCK(&gm->mutex); |
gm->mflags = mparams.default_mflags; |
} |
RELEASE_MAGIC_INIT_LOCK(); |
mparams.page_size = 4096; |
mparams.granularity = 16384; |
/* Sanity-check configuration: |
size_t must be unsigned and as wide as pointer type. |
ints must be at least 4 bytes. |
alignment must be at least 8. |
Alignment, min chunk size, and page size must all be powers of 2. |
*/ |
if ((sizeof(size_t) != sizeof(char*)) || |
(MAX_SIZE_T < MIN_CHUNK_SIZE) || |
(sizeof(int) < 4) || |
(MALLOC_ALIGNMENT < (size_t)8U) || |
((MALLOC_ALIGNMENT & (MALLOC_ALIGNMENT-SIZE_T_ONE)) != 0) || |
((MCHUNK_SIZE & (MCHUNK_SIZE-SIZE_T_ONE)) != 0) || |
((mparams.granularity & (mparams.granularity-SIZE_T_ONE)) != 0) || |
((mparams.page_size & (mparams.page_size-SIZE_T_ONE)) != 0)) |
ABORT; |
} |
return 0; |
} |
/* support for mallopt */ |
static int change_mparam(int param_number, int value) { |
size_t val = (size_t)value; |
init_mparams(); |
switch(param_number) { |
case M_TRIM_THRESHOLD: |
mparams.trim_threshold = val; |
return 1; |
case M_GRANULARITY: |
if (val >= mparams.page_size && ((val & (val-1)) == 0)) { |
mparams.granularity = val; |
return 1; |
} |
else |
return 0; |
case M_MMAP_THRESHOLD: |
mparams.mmap_threshold = val; |
return 1; |
default: |
return 0; |
} |
} |
#if DEBUG |
#endif /* DEBUG */ |
/* ----------------------------- statistics ------------------------------ */ |
#if !NO_MALLINFO |
#endif /* !NO_MALLINFO */ |
/* ----------------------- Operations on smallbins ----------------------- */ |
/* |
Various forms of linking and unlinking are defined as macros. Even |
the ones for trees, which are very long but have very short typical |
paths. This is ugly but reduces reliance on inlining support of |
compilers. |
*/ |
/* Link a free chunk into a smallbin */ |
#define insert_small_chunk(M, P, S) {\ |
bindex_t I = small_index(S);\ |
mchunkptr B = smallbin_at(M, I);\ |
mchunkptr F = B;\ |
assert(S >= MIN_CHUNK_SIZE);\ |
if (!smallmap_is_marked(M, I))\ |
mark_smallmap(M, I);\ |
else if (RTCHECK(ok_address(M, B->fd)))\ |
F = B->fd;\ |
else {\ |
CORRUPTION_ERROR_ACTION(M);\ |
}\ |
B->fd = P;\ |
F->bk = P;\ |
P->fd = F;\ |
P->bk = B;\ |
} |
/* Unlink a chunk from a smallbin */ |
#define unlink_small_chunk(M, P, S) {\ |
mchunkptr F = P->fd;\ |
mchunkptr B = P->bk;\ |
bindex_t I = small_index(S);\ |
assert(P != B);\ |
assert(P != F);\ |
assert(chunksize(P) == small_index2size(I));\ |
if (F == B)\ |
clear_smallmap(M, I);\ |
else if (RTCHECK((F == smallbin_at(M,I) || ok_address(M, F)) &&\ |
(B == smallbin_at(M,I) || ok_address(M, B)))) {\ |
F->bk = B;\ |
B->fd = F;\ |
}\ |
else {\ |
CORRUPTION_ERROR_ACTION(M);\ |
}\ |
} |
/* Unlink the first chunk from a smallbin */ |
#define unlink_first_small_chunk(M, B, P, I) {\ |
mchunkptr F = P->fd;\ |
assert(P != B);\ |
assert(P != F);\ |
assert(chunksize(P) == small_index2size(I));\ |
if (B == F)\ |
clear_smallmap(M, I);\ |
else if (RTCHECK(ok_address(M, F))) {\ |
B->fd = F;\ |
F->bk = B;\ |
}\ |
else {\ |
CORRUPTION_ERROR_ACTION(M);\ |
}\ |
} |
/* Replace dv node, binning the old one */ |
/* Used only when dvsize known to be small */ |
#define replace_dv(M, P, S) {\ |
size_t DVS = M->dvsize;\ |
if (DVS != 0) {\ |
mchunkptr DV = M->dv;\ |
assert(is_small(DVS));\ |
insert_small_chunk(M, DV, DVS);\ |
}\ |
M->dvsize = S;\ |
M->dv = P;\ |
} |
/* ------------------------- Operations on trees ------------------------- */ |
/* Insert chunk into tree */ |
#define insert_large_chunk(M, X, S) {\ |
tbinptr* H;\ |
bindex_t I;\ |
compute_tree_index(S, I);\ |
H = treebin_at(M, I);\ |
X->index = I;\ |
X->child[0] = X->child[1] = 0;\ |
if (!treemap_is_marked(M, I)) {\ |
mark_treemap(M, I);\ |
*H = X;\ |
X->parent = (tchunkptr)H;\ |
X->fd = X->bk = X;\ |
}\ |
else {\ |
tchunkptr T = *H;\ |
size_t K = S << leftshift_for_tree_index(I);\ |
for (;;) {\ |
if (chunksize(T) != S) {\ |
tchunkptr* C = &(T->child[(K >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1]);\ |
K <<= 1;\ |
if (*C != 0)\ |
T = *C;\ |
else if (RTCHECK(ok_address(M, C))) {\ |
*C = X;\ |
X->parent = T;\ |
X->fd = X->bk = X;\ |
break;\ |
}\ |
else {\ |
CORRUPTION_ERROR_ACTION(M);\ |
break;\ |
}\ |
}\ |
else {\ |
tchunkptr F = T->fd;\ |
if (RTCHECK(ok_address(M, T) && ok_address(M, F))) {\ |
T->fd = F->bk = X;\ |
X->fd = F;\ |
X->bk = T;\ |
X->parent = 0;\ |
break;\ |
}\ |
else {\ |
CORRUPTION_ERROR_ACTION(M);\ |
break;\ |
}\ |
}\ |
}\ |
}\ |
} |
/* |
Unlink steps: |
1. If x is a chained node, unlink it from its same-sized fd/bk links |
and choose its bk node as its replacement. |
2. If x was the last node of its size, but not a leaf node, it must |
be replaced with a leaf node (not merely one with an open left or |
right), to make sure that lefts and rights of descendents |
correspond properly to bit masks. We use the rightmost descendent |
of x. We could use any other leaf, but this is easy to locate and |
tends to counteract removal of leftmosts elsewhere, and so keeps |
paths shorter than minimally guaranteed. This doesn't loop much |
because on average a node in a tree is near the bottom. |
3. If x is the base of a chain (i.e., has parent links) relink |
x's parent and children to x's replacement (or null if none). |
*/ |
#define unlink_large_chunk(M, X) {\ |
tchunkptr XP = X->parent;\ |
tchunkptr R;\ |
if (X->bk != X) {\ |
tchunkptr F = X->fd;\ |
R = X->bk;\ |
if (RTCHECK(ok_address(M, F))) {\ |
F->bk = R;\ |
R->fd = F;\ |
}\ |
else {\ |
CORRUPTION_ERROR_ACTION(M);\ |
}\ |
}\ |
else {\ |
tchunkptr* RP;\ |
if (((R = *(RP = &(X->child[1]))) != 0) ||\ |
((R = *(RP = &(X->child[0]))) != 0)) {\ |
tchunkptr* CP;\ |
while ((*(CP = &(R->child[1])) != 0) ||\ |
(*(CP = &(R->child[0])) != 0)) {\ |
R = *(RP = CP);\ |
}\ |
if (RTCHECK(ok_address(M, RP)))\ |
*RP = 0;\ |
else {\ |
CORRUPTION_ERROR_ACTION(M);\ |
}\ |
}\ |
}\ |
if (XP != 0) {\ |
tbinptr* H = treebin_at(M, X->index);\ |
if (X == *H) {\ |
if ((*H = R) == 0) \ |
clear_treemap(M, X->index);\ |
}\ |
else if (RTCHECK(ok_address(M, XP))) {\ |
if (XP->child[0] == X) \ |
XP->child[0] = R;\ |
else \ |
XP->child[1] = R;\ |
}\ |
else\ |
CORRUPTION_ERROR_ACTION(M);\ |
if (R != 0) {\ |
if (RTCHECK(ok_address(M, R))) {\ |
tchunkptr C0, C1;\ |
R->parent = XP;\ |
if ((C0 = X->child[0]) != 0) {\ |
if (RTCHECK(ok_address(M, C0))) {\ |
R->child[0] = C0;\ |
C0->parent = R;\ |
}\ |
else\ |
CORRUPTION_ERROR_ACTION(M);\ |
}\ |
if ((C1 = X->child[1]) != 0) {\ |
if (RTCHECK(ok_address(M, C1))) {\ |
R->child[1] = C1;\ |
C1->parent = R;\ |
}\ |
else\ |
CORRUPTION_ERROR_ACTION(M);\ |
}\ |
}\ |
else\ |
CORRUPTION_ERROR_ACTION(M);\ |
}\ |
}\ |
} |
/* Relays to large vs small bin operations */ |
#define insert_chunk(M, P, S)\ |
if (is_small(S)) insert_small_chunk(M, P, S)\ |
else { tchunkptr TP = (tchunkptr)(P); insert_large_chunk(M, TP, S); } |
#define unlink_chunk(M, P, S)\ |
if (is_small(S)) unlink_small_chunk(M, P, S)\ |
else { tchunkptr TP = (tchunkptr)(P); unlink_large_chunk(M, TP); } |
/* Relays to internal calls to malloc/free from realloc, memalign etc */ |
#if ONLY_MSPACES |
#define internal_malloc(m, b) mspace_malloc(m, b) |
#define internal_free(m, mem) mspace_free(m,mem); |
#else /* ONLY_MSPACES */ |
#if MSPACES |
#define internal_malloc(m, b)\ |
(m == gm)? dlmalloc(b) : mspace_malloc(m, b) |
#define internal_free(m, mem)\ |
if (m == gm) dlfree(mem); else mspace_free(m,mem); |
#else /* MSPACES */ |
#define internal_malloc(m, b) dlmalloc(b) |
#define internal_free(m, mem) dlfree(mem) |
#endif /* MSPACES */ |
#endif /* ONLY_MSPACES */ |
/* ----------------------- Direct-mmapping chunks ----------------------- */ |
/* |
Directly mmapped chunks are set up with an offset to the start of |
the mmapped region stored in the prev_foot field of the chunk. This |
allows reconstruction of the required argument to MUNMAP when freed, |
and also allows adjustment of the returned chunk to meet alignment |
requirements (especially in memalign). There is also enough space |
allocated to hold a fake next chunk of size SIZE_T_SIZE to maintain |
the PINUSE bit so frees can be checked. |
*/ |
/* Malloc using mmap */ |
static void* mmap_alloc(mstate m, size_t nb) { |
size_t mmsize = granularity_align(nb + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK); |
if (mmsize > nb) { /* Check for wrap around 0 */ |
char* mm = (char*)(DIRECT_MMAP(mmsize)); |
if (mm != CMFAIL) { |
size_t offset = align_offset(chunk2mem(mm)); |
size_t psize = mmsize - offset - MMAP_FOOT_PAD; |
mchunkptr p = (mchunkptr)(mm + offset); |
p->prev_foot = offset | IS_MMAPPED_BIT; |
(p)->head = (psize|CINUSE_BIT); |
mark_inuse_foot(m, p, psize); |
chunk_plus_offset(p, psize)->head = FENCEPOST_HEAD; |
chunk_plus_offset(p, psize+SIZE_T_SIZE)->head = 0; |
if (mm < m->least_addr) |
m->least_addr = mm; |
if ((m->footprint += mmsize) > m->max_footprint) |
m->max_footprint = m->footprint; |
assert(is_aligned(chunk2mem(p))); |
check_mmapped_chunk(m, p); |
return chunk2mem(p); |
} |
} |
return 0; |
} |
/* Realloc using mmap */ |
static mchunkptr mmap_resize(mstate m, mchunkptr oldp, size_t nb) { |
size_t oldsize = chunksize(oldp); |
if (is_small(nb)) /* Can't shrink mmap regions below small size */ |
return 0; |
/* Keep old chunk if big enough but not too big */ |
if (oldsize >= nb + SIZE_T_SIZE && |
(oldsize - nb) <= (mparams.granularity << 1)) |
return oldp; |
else { |
size_t offset = oldp->prev_foot & ~IS_MMAPPED_BIT; |
size_t oldmmsize = oldsize + offset + MMAP_FOOT_PAD; |
size_t newmmsize = granularity_align(nb + SIX_SIZE_T_SIZES + |
CHUNK_ALIGN_MASK); |
char* cp = (char*)CALL_MREMAP((char*)oldp - offset, |
oldmmsize, newmmsize, 1); |
if (cp != CMFAIL) { |
mchunkptr newp = (mchunkptr)(cp + offset); |
size_t psize = newmmsize - offset - MMAP_FOOT_PAD; |
newp->head = (psize|CINUSE_BIT); |
mark_inuse_foot(m, newp, psize); |
chunk_plus_offset(newp, psize)->head = FENCEPOST_HEAD; |
chunk_plus_offset(newp, psize+SIZE_T_SIZE)->head = 0; |
if (cp < m->least_addr) |
m->least_addr = cp; |
if ((m->footprint += newmmsize - oldmmsize) > m->max_footprint) |
m->max_footprint = m->footprint; |
check_mmapped_chunk(m, newp); |
return newp; |
} |
} |
return 0; |
} |
/* -------------------------- mspace management -------------------------- */ |
/* Initialize top chunk and its size */ |
static void init_top(mstate m, mchunkptr p, size_t psize) { |
/* Ensure alignment */ |
size_t offset = align_offset(chunk2mem(p)); |
p = (mchunkptr)((char*)p + offset); |
psize -= offset; |
m->top = p; |
m->topsize = psize; |
p->head = psize | PINUSE_BIT; |
/* set size of fake trailing chunk holding overhead space only once */ |
chunk_plus_offset(p, psize)->head = TOP_FOOT_SIZE; |
m->trim_check = mparams.trim_threshold; /* reset on each update */ |
} |
/* Initialize bins for a new mstate that is otherwise zeroed out */ |
static void init_bins(mstate m) { |
/* Establish circular links for smallbins */ |
bindex_t i; |
for (i = 0; i < NSMALLBINS; ++i) { |
sbinptr bin = smallbin_at(m,i); |
bin->fd = bin->bk = bin; |
} |
} |
#if PROCEED_ON_ERROR |
/* default corruption action */ |
static void reset_on_error(mstate m) { |
int i; |
++malloc_corruption_error_count; |
/* Reinitialize fields to forget about all memory */ |
m->smallbins = m->treebins = 0; |
m->dvsize = m->topsize = 0; |
m->seg.base = 0; |
m->seg.size = 0; |
m->seg.next = 0; |
m->top = m->dv = 0; |
for (i = 0; i < NTREEBINS; ++i) |
*treebin_at(m, i) = 0; |
init_bins(m); |
} |
#endif /* PROCEED_ON_ERROR */ |
/* Allocate chunk and prepend remainder with chunk in successor base. */ |
static void* prepend_alloc(mstate m, char* newbase, char* oldbase, |
size_t nb) { |
mchunkptr p = align_as_chunk(newbase); |
mchunkptr oldfirst = align_as_chunk(oldbase); |
size_t psize = (char*)oldfirst - (char*)p; |
mchunkptr q = chunk_plus_offset(p, nb); |
size_t qsize = psize - nb; |
set_size_and_pinuse_of_inuse_chunk(m, p, nb); |
assert((char*)oldfirst > (char*)q); |
assert(pinuse(oldfirst)); |
assert(qsize >= MIN_CHUNK_SIZE); |
/* consolidate remainder with first chunk of old base */ |
if (oldfirst == m->top) { |
size_t tsize = m->topsize += qsize; |
m->top = q; |
q->head = tsize | PINUSE_BIT; |
check_top_chunk(m, q); |
} |
else if (oldfirst == m->dv) { |
size_t dsize = m->dvsize += qsize; |
m->dv = q; |
set_size_and_pinuse_of_free_chunk(q, dsize); |
} |
else { |
if (!cinuse(oldfirst)) { |
size_t nsize = chunksize(oldfirst); |
unlink_chunk(m, oldfirst, nsize); |
oldfirst = chunk_plus_offset(oldfirst, nsize); |
qsize += nsize; |
} |
set_free_with_pinuse(q, qsize, oldfirst); |
insert_chunk(m, q, qsize); |
check_free_chunk(m, q); |
} |
check_malloced_chunk(m, chunk2mem(p), nb); |
return chunk2mem(p); |
} |
/* Add a segment to hold a new noncontiguous region */ |
static void add_segment(mstate m, char* tbase, size_t tsize, flag_t mmapped) { |
/* Determine locations and sizes of segment, fenceposts, old top */ |
char* old_top = (char*)m->top; |
msegmentptr oldsp = segment_holding(m, old_top); |
char* old_end = oldsp->base + oldsp->size; |
size_t ssize = pad_request(sizeof(struct malloc_segment)); |
char* rawsp = old_end - (ssize + FOUR_SIZE_T_SIZES + CHUNK_ALIGN_MASK); |
size_t offset = align_offset(chunk2mem(rawsp)); |
char* asp = rawsp + offset; |
char* csp = (asp < (old_top + MIN_CHUNK_SIZE))? old_top : asp; |
mchunkptr sp = (mchunkptr)csp; |
msegmentptr ss = (msegmentptr)(chunk2mem(sp)); |
mchunkptr tnext = chunk_plus_offset(sp, ssize); |
mchunkptr p = tnext; |
int nfences = 0; |
/* reset top to new space */ |
init_top(m, (mchunkptr)tbase, tsize - TOP_FOOT_SIZE); |
/* Set up segment record */ |
assert(is_aligned(ss)); |
set_size_and_pinuse_of_inuse_chunk(m, sp, ssize); |
*ss = m->seg; /* Push current record */ |
m->seg.base = tbase; |
m->seg.size = tsize; |
m->seg.sflags = mmapped; |
m->seg.next = ss; |
/* Insert trailing fenceposts */ |
for (;;) { |
mchunkptr nextp = chunk_plus_offset(p, SIZE_T_SIZE); |
p->head = FENCEPOST_HEAD; |
++nfences; |
if ((char*)(&(nextp->head)) < old_end) |
p = nextp; |
else |
break; |
} |
assert(nfences >= 2); |
/* Insert the rest of old top into a bin as an ordinary free chunk */ |
if (csp != old_top) { |
mchunkptr q = (mchunkptr)old_top; |
size_t psize = csp - old_top; |
mchunkptr tn = chunk_plus_offset(q, psize); |
set_free_with_pinuse(q, psize, tn); |
insert_chunk(m, q, psize); |
} |
check_top_chunk(m, m->top); |
} |
/* -------------------------- System allocation -------------------------- */ |
/* Get memory from system using MORECORE or MMAP */ |
static void* sys_alloc(mstate m, size_t nb) { |
char* tbase = CMFAIL; |
size_t tsize = 0; |
flag_t mmap_flag = 0; |
init_mparams(); |
/* Directly map large chunks */ |
if (use_mmap(m) && nb >= mparams.mmap_threshold) { |
void* mem = mmap_alloc(m, nb); |
if (mem != 0) |
return mem; |
} |
/* |
Try getting memory in any of three ways (in most-preferred to |
least-preferred order): |
1. A call to MORECORE that can normally contiguously extend memory. |
(disabled if not MORECORE_CONTIGUOUS or not HAVE_MORECORE or |
or main space is mmapped or a previous contiguous call failed) |
2. A call to MMAP new space (disabled if not HAVE_MMAP). |
Note that under the default settings, if MORECORE is unable to |
fulfill a request, and HAVE_MMAP is true, then mmap is |
used as a noncontiguous system allocator. This is a useful backup |
strategy for systems with holes in address spaces -- in this case |
sbrk cannot contiguously expand the heap, but mmap may be able to |
find space. |
3. A call to MORECORE that cannot usually contiguously extend memory. |
(disabled if not HAVE_MORECORE) |
*/ |
if (MORECORE_CONTIGUOUS && !use_noncontiguous(m)) { |
char* br = CMFAIL; |
msegmentptr ss = (m->top == 0)? 0 : segment_holding(m, (char*)m->top); |
size_t asize = 0; |
ACQUIRE_MORECORE_LOCK(); |
if (ss == 0) { /* First time through or recovery */ |
char* base = (char*)CALL_MORECORE(0); |
if (base != CMFAIL) { |
asize = granularity_align(nb + TOP_FOOT_SIZE + SIZE_T_ONE); |
/* Adjust to end on a page boundary */ |
if (!is_page_aligned(base)) |
asize += (page_align((size_t)base) - (size_t)base); |
/* Can't call MORECORE if size is negative when treated as signed */ |
if (asize < HALF_MAX_SIZE_T && |
(br = (char*)(CALL_MORECORE(asize))) == base) { |
tbase = base; |
tsize = asize; |
} |
} |
} |
else { |
/* Subtract out existing available top space from MORECORE request. */ |
asize = granularity_align(nb - m->topsize + TOP_FOOT_SIZE + SIZE_T_ONE); |
/* Use mem here only if it did continuously extend old space */ |
if (asize < HALF_MAX_SIZE_T && |
(br = (char*)(CALL_MORECORE(asize))) == ss->base+ss->size) { |
tbase = br; |
tsize = asize; |
} |
} |
if (tbase == CMFAIL) { /* Cope with partial failure */ |
if (br != CMFAIL) { /* Try to use/extend the space we did get */ |
if (asize < HALF_MAX_SIZE_T && |
asize < nb + TOP_FOOT_SIZE + SIZE_T_ONE) { |
size_t esize = granularity_align(nb + TOP_FOOT_SIZE + SIZE_T_ONE - asize); |
if (esize < HALF_MAX_SIZE_T) { |
char* end = (char*)CALL_MORECORE(esize); |
if (end != CMFAIL) |
asize += esize; |
else { /* Can't use; try to release */ |
CALL_MORECORE(-asize); |
br = CMFAIL; |
} |
} |
} |
} |
if (br != CMFAIL) { /* Use the space we did get */ |
tbase = br; |
tsize = asize; |
} |
else |
disable_contiguous(m); /* Don't try contiguous path in the future */ |
} |
RELEASE_MORECORE_LOCK(); |
} |
if (HAVE_MMAP && tbase == CMFAIL) { /* Try MMAP */ |
size_t req = nb + TOP_FOOT_SIZE + SIZE_T_ONE; |
size_t rsize = granularity_align(req); |
if (rsize > nb) { /* Fail if wraps around zero */ |
char* mp = (char*)(CALL_MMAP(rsize)); |
if (mp != CMFAIL) { |
tbase = mp; |
tsize = rsize; |
mmap_flag = IS_MMAPPED_BIT; |
} |
} |
} |
if (HAVE_MORECORE && tbase == CMFAIL) { /* Try noncontiguous MORECORE */ |
size_t asize = granularity_align(nb + TOP_FOOT_SIZE + SIZE_T_ONE); |
if (asize < HALF_MAX_SIZE_T) { |
char* br = CMFAIL; |
char* end = CMFAIL; |
ACQUIRE_MORECORE_LOCK(); |
br = (char*)(CALL_MORECORE(asize)); |
end = (char*)(CALL_MORECORE(0)); |
RELEASE_MORECORE_LOCK(); |
if (br != CMFAIL && end != CMFAIL && br < end) { |
size_t ssize = end - br; |
if (ssize > nb + TOP_FOOT_SIZE) { |
tbase = br; |
tsize = ssize; |
} |
} |
} |
} |
if (tbase != CMFAIL) { |
if ((m->footprint += tsize) > m->max_footprint) |
m->max_footprint = m->footprint; |
if (!is_initialized(m)) { /* first-time initialization */ |
m->seg.base = m->least_addr = tbase; |
m->seg.size = tsize; |
m->seg.sflags = mmap_flag; |
m->magic = mparams.magic; |
init_bins(m); |
if (is_global(m)) |
init_top(m, (mchunkptr)tbase, tsize - TOP_FOOT_SIZE); |
else { |
/* Offset top by embedded malloc_state */ |
mchunkptr mn = next_chunk(mem2chunk(m)); |
init_top(m, mn, (size_t)((tbase + tsize) - (char*)mn) -TOP_FOOT_SIZE); |
} |
} |
else { |
/* Try to merge with an existing segment */ |
msegmentptr sp = &m->seg; |
while (sp != 0 && tbase != sp->base + sp->size) |
sp = sp->next; |
if (sp != 0 && |
!is_extern_segment(sp) && |
(sp->sflags & IS_MMAPPED_BIT) == mmap_flag && |
segment_holds(sp, m->top)) { /* append */ |
sp->size += tsize; |
init_top(m, m->top, m->topsize + tsize); |
} |
else { |
if (tbase < m->least_addr) |
m->least_addr = tbase; |
sp = &m->seg; |
while (sp != 0 && sp->base != tbase + tsize) |
sp = sp->next; |
if (sp != 0 && |
!is_extern_segment(sp) && |
(sp->sflags & IS_MMAPPED_BIT) == mmap_flag) { |
char* oldbase = sp->base; |
sp->base = tbase; |
sp->size += tsize; |
return prepend_alloc(m, tbase, oldbase, nb); |
} |
else |
add_segment(m, tbase, tsize, mmap_flag); |
} |
} |
if (nb < m->topsize) { /* Allocate from new or extended top space */ |
size_t rsize = m->topsize -= nb; |
mchunkptr p = m->top; |
mchunkptr r = m->top = chunk_plus_offset(p, nb); |
r->head = rsize | PINUSE_BIT; |
set_size_and_pinuse_of_inuse_chunk(m, p, nb); |
check_top_chunk(m, m->top); |
check_malloced_chunk(m, chunk2mem(p), nb); |
return chunk2mem(p); |
} |
} |
MALLOC_FAILURE_ACTION; |
return 0; |
} |
/* ----------------------- system deallocation -------------------------- */ |
/* Unmap and unlink any mmapped segments that don't contain used chunks */ |
static size_t release_unused_segments(mstate m) { |
size_t released = 0; |
msegmentptr pred = &m->seg; |
msegmentptr sp = pred->next; |
while (sp != 0) { |
char* base = sp->base; |
size_t size = sp->size; |
msegmentptr next = sp->next; |
if (is_mmapped_segment(sp) && !is_extern_segment(sp)) { |
mchunkptr p = align_as_chunk(base); |
size_t psize = chunksize(p); |
/* Can unmap if first chunk holds entire segment and not pinned */ |
if (!cinuse(p) && (char*)p + psize >= base + size - TOP_FOOT_SIZE) { |
tchunkptr tp = (tchunkptr)p; |
assert(segment_holds(sp, (char*)sp)); |
if (p == m->dv) { |
m->dv = 0; |
m->dvsize = 0; |
} |
else { |
unlink_large_chunk(m, tp); |
} |
if (CALL_MUNMAP(base, size) == 0) { |
released += size; |
m->footprint -= size; |
/* unlink obsoleted record */ |
sp = pred; |
sp->next = next; |
} |
else { /* back out if cannot unmap */ |
insert_large_chunk(m, tp, psize); |
} |
} |
} |
pred = sp; |
sp = next; |
} |
return released; |
} |
static int sys_trim(mstate m, size_t pad) { |
size_t released = 0; |
if (pad < MAX_REQUEST && is_initialized(m)) { |
pad += TOP_FOOT_SIZE; /* ensure enough room for segment overhead */ |
if (m->topsize > pad) { |
/* Shrink top space in granularity-size units, keeping at least one */ |
size_t unit = mparams.granularity; |
size_t extra = ((m->topsize - pad + (unit - SIZE_T_ONE)) / unit - |
SIZE_T_ONE) * unit; |
msegmentptr sp = segment_holding(m, (char*)m->top); |
if (!is_extern_segment(sp)) { |
if (is_mmapped_segment(sp)) { |
if (HAVE_MMAP && |
sp->size >= extra && |
!has_segment_link(m, sp)) { /* can't shrink if pinned */ |
size_t newsize = sp->size - extra; |
/* Prefer mremap, fall back to munmap */ |
if ((CALL_MREMAP(sp->base, sp->size, newsize, 0) != MFAIL) || |
(CALL_MUNMAP(sp->base + newsize, extra) == 0)) { |
released = extra; |
} |
} |
} |
else if (HAVE_MORECORE) { |
if (extra >= HALF_MAX_SIZE_T) /* Avoid wrapping negative */ |
extra = (HALF_MAX_SIZE_T) + SIZE_T_ONE - unit; |
ACQUIRE_MORECORE_LOCK(); |
{ |
/* Make sure end of memory is where we last set it. */ |
char* old_br = (char*)(CALL_MORECORE(0)); |
if (old_br == sp->base + sp->size) { |
char* rel_br = (char*)(CALL_MORECORE(-extra)); |
char* new_br = (char*)(CALL_MORECORE(0)); |
if (rel_br != CMFAIL && new_br < old_br) |
released = old_br - new_br; |
} |
} |
RELEASE_MORECORE_LOCK(); |
} |
} |
if (released != 0) { |
sp->size -= released; |
m->footprint -= released; |
init_top(m, m->top, m->topsize - released); |
check_top_chunk(m, m->top); |
} |
} |
/* Unmap any unused mmapped segments */ |
if (HAVE_MMAP) |
released += release_unused_segments(m); |
/* On failure, disable autotrim to avoid repeated failed future calls */ |
if (released == 0) |
m->trim_check = MAX_SIZE_T; |
} |
return (released != 0)? 1 : 0; |
} |
/* ---------------------------- malloc support --------------------------- */ |
/* allocate a large request from the best fitting chunk in a treebin */ |
static void* tmalloc_large(mstate m, size_t nb) { |
tchunkptr v = 0; |
size_t rsize = -nb; /* Unsigned negation */ |
tchunkptr t; |
bindex_t idx; |
compute_tree_index(nb, idx); |
if ((t = *treebin_at(m, idx)) != 0) { |
/* Traverse tree for this bin looking for node with size == nb */ |
size_t sizebits = nb << leftshift_for_tree_index(idx); |
tchunkptr rst = 0; /* The deepest untaken right subtree */ |
for (;;) { |
tchunkptr rt; |
size_t trem = chunksize(t) - nb; |
if (trem < rsize) { |
v = t; |
if ((rsize = trem) == 0) |
break; |
} |
rt = t->child[1]; |
t = t->child[(sizebits >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1]; |
if (rt != 0 && rt != t) |
rst = rt; |
if (t == 0) { |
t = rst; /* set t to least subtree holding sizes > nb */ |
break; |
} |
sizebits <<= 1; |
} |
} |
if (t == 0 && v == 0) { /* set t to root of next non-empty treebin */ |
binmap_t leftbits = left_bits(idx2bit(idx)) & m->treemap; |
if (leftbits != 0) { |
bindex_t i; |
binmap_t leastbit = least_bit(leftbits); |
compute_bit2idx(leastbit, i); |
t = *treebin_at(m, i); |
} |
} |
while (t != 0) { /* find smallest of tree or subtree */ |
size_t trem = chunksize(t) - nb; |
if (trem < rsize) { |
rsize = trem; |
v = t; |
} |
t = leftmost_child(t); |
} |
/* If dv is a better fit, return 0 so malloc will use it */ |
if (v != 0 && rsize < (size_t)(m->dvsize - nb)) { |
if (RTCHECK(ok_address(m, v))) { /* split */ |
mchunkptr r = chunk_plus_offset(v, nb); |
assert(chunksize(v) == rsize + nb); |
if (RTCHECK(ok_next(v, r))) { |
unlink_large_chunk(m, v); |
if (rsize < MIN_CHUNK_SIZE) |
set_inuse_and_pinuse(m, v, (rsize + nb)); |
else { |
set_size_and_pinuse_of_inuse_chunk(m, v, nb); |
set_size_and_pinuse_of_free_chunk(r, rsize); |
insert_chunk(m, r, rsize); |
} |
return chunk2mem(v); |
} |
} |
CORRUPTION_ERROR_ACTION(m); |
} |
return 0; |
} |
/* allocate a small request from the best fitting chunk in a treebin */ |
static void* tmalloc_small(mstate m, size_t nb) { |
tchunkptr t, v; |
size_t rsize; |
bindex_t i; |
binmap_t leastbit = least_bit(m->treemap); |
compute_bit2idx(leastbit, i); |
v = t = *treebin_at(m, i); |
rsize = chunksize(t) - nb; |
while ((t = leftmost_child(t)) != 0) { |
size_t trem = chunksize(t) - nb; |
if (trem < rsize) { |
rsize = trem; |
v = t; |
} |
} |
if (RTCHECK(ok_address(m, v))) { |
mchunkptr r = chunk_plus_offset(v, nb); |
assert(chunksize(v) == rsize + nb); |
if (RTCHECK(ok_next(v, r))) { |
unlink_large_chunk(m, v); |
if (rsize < MIN_CHUNK_SIZE) |
set_inuse_and_pinuse(m, v, (rsize + nb)); |
else { |
set_size_and_pinuse_of_inuse_chunk(m, v, nb); |
set_size_and_pinuse_of_free_chunk(r, rsize); |
replace_dv(m, r, rsize); |
} |
return chunk2mem(v); |
} |
} |
CORRUPTION_ERROR_ACTION(m); |
return 0; |
} |
/* --------------------------- realloc support --------------------------- */ |
static void* internal_realloc(mstate m, void* oldmem, size_t bytes) { |
if (bytes >= MAX_REQUEST) { |
MALLOC_FAILURE_ACTION; |
return 0; |
} |
if (!PREACTION(m)) { |
mchunkptr oldp = mem2chunk(oldmem); |
size_t oldsize = chunksize(oldp); |
mchunkptr next = chunk_plus_offset(oldp, oldsize); |
mchunkptr newp = 0; |
void* extra = 0; |
/* Try to either shrink or extend into top. Else malloc-copy-free */ |
if (RTCHECK(ok_address(m, oldp) && ok_cinuse(oldp) && |
ok_next(oldp, next) && ok_pinuse(next))) { |
size_t nb = request2size(bytes); |
if (is_mmapped(oldp)) |
newp = mmap_resize(m, oldp, nb); |
else if (oldsize >= nb) { /* already big enough */ |
size_t rsize = oldsize - nb; |
newp = oldp; |
if (rsize >= MIN_CHUNK_SIZE) { |
mchunkptr remainder = chunk_plus_offset(newp, nb); |
set_inuse(m, newp, nb); |
set_inuse(m, remainder, rsize); |
extra = chunk2mem(remainder); |
} |
} |
else if (next == m->top && oldsize + m->topsize > nb) { |
/* Expand into top */ |
size_t newsize = oldsize + m->topsize; |
size_t newtopsize = newsize - nb; |
mchunkptr newtop = chunk_plus_offset(oldp, nb); |
set_inuse(m, oldp, nb); |
newtop->head = newtopsize |PINUSE_BIT; |
m->top = newtop; |
m->topsize = newtopsize; |
newp = oldp; |
} |
} |
else { |
USAGE_ERROR_ACTION(m, oldmem); |
POSTACTION(m); |
return 0; |
} |
POSTACTION(m); |
if (newp != 0) { |
if (extra != 0) { |
internal_free(m, extra); |
} |
check_inuse_chunk(m, newp); |
return chunk2mem(newp); |
} |
else { |
void* newmem = internal_malloc(m, bytes); |
if (newmem != 0) { |
size_t oc = oldsize - overhead_for(oldp); |
memcpy(newmem, oldmem, (oc < bytes)? oc : bytes); |
internal_free(m, oldmem); |
} |
return newmem; |
} |
} |
return 0; |
} |
/* --------------------------- memalign support -------------------------- */ |
static void* internal_memalign(mstate m, size_t alignment, size_t bytes) { |
if (alignment <= MALLOC_ALIGNMENT) /* Can just use malloc */ |
return internal_malloc(m, bytes); |
if (alignment < MIN_CHUNK_SIZE) /* must be at least a minimum chunk size */ |
alignment = MIN_CHUNK_SIZE; |
if ((alignment & (alignment-SIZE_T_ONE)) != 0) {/* Ensure a power of 2 */ |
size_t a = MALLOC_ALIGNMENT << 1; |
while (a < alignment) a <<= 1; |
alignment = a; |
} |
if (bytes >= MAX_REQUEST - alignment) { |
if (m != 0) { /* Test isn't needed but avoids compiler warning */ |
MALLOC_FAILURE_ACTION; |
} |
} |
else { |
size_t nb = request2size(bytes); |
size_t req = nb + alignment + MIN_CHUNK_SIZE - CHUNK_OVERHEAD; |
char* mem = (char*)internal_malloc(m, req); |
if (mem != 0) { |
void* leader = 0; |
void* trailer = 0; |
mchunkptr p = mem2chunk(mem); |
if (PREACTION(m)) return 0; |
if ((((size_t)(mem)) % alignment) != 0) { /* misaligned */ |
/* |
Find an aligned spot inside chunk. Since we need to give |
back leading space in a chunk of at least MIN_CHUNK_SIZE, if |
the first calculation places us at a spot with less than |
MIN_CHUNK_SIZE leader, we can move to the next aligned spot. |
We've allocated enough total room so that this is always |
possible. |
*/ |
char* br = (char*)mem2chunk((size_t)(((size_t)(mem + |
alignment - |
SIZE_T_ONE)) & |
-alignment)); |
char* pos = ((size_t)(br - (char*)(p)) >= MIN_CHUNK_SIZE)? |
br : br+alignment; |
mchunkptr newp = (mchunkptr)pos; |
size_t leadsize = pos - (char*)(p); |
size_t newsize = chunksize(p) - leadsize; |
if (is_mmapped(p)) { /* For mmapped chunks, just adjust offset */ |
newp->prev_foot = p->prev_foot + leadsize; |
newp->head = (newsize|CINUSE_BIT); |
} |
else { /* Otherwise, give back leader, use the rest */ |
set_inuse(m, newp, newsize); |
set_inuse(m, p, leadsize); |
leader = chunk2mem(p); |
} |
p = newp; |
} |
/* Give back spare room at the end */ |
if (!is_mmapped(p)) { |
size_t size = chunksize(p); |
if (size > nb + MIN_CHUNK_SIZE) { |
size_t remainder_size = size - nb; |
mchunkptr remainder = chunk_plus_offset(p, nb); |
set_inuse(m, p, nb); |
set_inuse(m, remainder, remainder_size); |
trailer = chunk2mem(remainder); |
} |
} |
assert (chunksize(p) >= nb); |
assert((((size_t)(chunk2mem(p))) % alignment) == 0); |
check_inuse_chunk(m, p); |
POSTACTION(m); |
if (leader != 0) { |
internal_free(m, leader); |
} |
if (trailer != 0) { |
internal_free(m, trailer); |
} |
return chunk2mem(p); |
} |
} |
return 0; |
} |
/* ------------------------ comalloc/coalloc support --------------------- */ |
static void** ialloc(mstate m, |
size_t n_elements, |
size_t* sizes, |
int opts, |
void* chunks[]) { |
/* |
This provides common support for independent_X routines, handling |
all of the combinations that can result. |
The opts arg has: |
bit 0 set if all elements are same size (using sizes[0]) |
bit 1 set if elements should be zeroed |
*/ |
size_t element_size; /* chunksize of each element, if all same */ |
size_t contents_size; /* total size of elements */ |
size_t array_size; /* request size of pointer array */ |
void* mem; /* malloced aggregate space */ |
mchunkptr p; /* corresponding chunk */ |
size_t remainder_size; /* remaining bytes while splitting */ |
void** marray; /* either "chunks" or malloced ptr array */ |
mchunkptr array_chunk; /* chunk for malloced ptr array */ |
flag_t was_enabled; /* to disable mmap */ |
size_t size; |
size_t i; |
/* compute array length, if needed */ |
if (chunks != 0) { |
if (n_elements == 0) |
return chunks; /* nothing to do */ |
marray = chunks; |
array_size = 0; |
} |
else { |
/* if empty req, must still return chunk representing empty array */ |
if (n_elements == 0) |
return (void**)internal_malloc(m, 0); |
marray = 0; |
array_size = request2size(n_elements * (sizeof(void*))); |
} |
/* compute total element size */ |
if (opts & 0x1) { /* all-same-size */ |
element_size = request2size(*sizes); |
contents_size = n_elements * element_size; |
} |
else { /* add up all the sizes */ |
element_size = 0; |
contents_size = 0; |
for (i = 0; i != n_elements; ++i) |
contents_size += request2size(sizes[i]); |
} |
size = contents_size + array_size; |
/* |
Allocate the aggregate chunk. First disable direct-mmapping so |
malloc won't use it, since we would not be able to later |
free/realloc space internal to a segregated mmap region. |
*/ |
was_enabled = use_mmap(m); |
disable_mmap(m); |
mem = internal_malloc(m, size - CHUNK_OVERHEAD); |
if (was_enabled) |
enable_mmap(m); |
if (mem == 0) |
return 0; |
if (PREACTION(m)) return 0; |
p = mem2chunk(mem); |
remainder_size = chunksize(p); |
assert(!is_mmapped(p)); |
if (opts & 0x2) { /* optionally clear the elements */ |
memset((size_t*)mem, 0, remainder_size - SIZE_T_SIZE - array_size); |
} |
/* If not provided, allocate the pointer array as final part of chunk */ |
if (marray == 0) { |
size_t array_chunk_size; |
array_chunk = chunk_plus_offset(p, contents_size); |
array_chunk_size = remainder_size - contents_size; |
marray = (void**) (chunk2mem(array_chunk)); |
set_size_and_pinuse_of_inuse_chunk(m, array_chunk, array_chunk_size); |
remainder_size = contents_size; |
} |
/* split out elements */ |
for (i = 0; ; ++i) { |
marray[i] = chunk2mem(p); |
if (i != n_elements-1) { |
if (element_size != 0) |
size = element_size; |
else |
size = request2size(sizes[i]); |
remainder_size -= size; |
set_size_and_pinuse_of_inuse_chunk(m, p, size); |
p = chunk_plus_offset(p, size); |
} |
else { /* the final element absorbs any overallocation slop */ |
set_size_and_pinuse_of_inuse_chunk(m, p, remainder_size); |
break; |
} |
} |
#if DEBUG |
if (marray != chunks) { |
/* final element must have exactly exhausted chunk */ |
if (element_size != 0) { |
assert(remainder_size == element_size); |
} |
else { |
assert(remainder_size == request2size(sizes[i])); |
} |
check_inuse_chunk(m, mem2chunk(marray)); |
} |
for (i = 0; i != n_elements; ++i) |
check_inuse_chunk(m, mem2chunk(marray[i])); |
#endif /* DEBUG */ |
POSTACTION(m); |
return marray; |
} |
/* -------------------------- public routines ---------------------------- */ |
#if !ONLY_MSPACES |
void* dlmalloc(size_t bytes) { |
/* |
Basic algorithm: |
If a small request (< 256 bytes minus per-chunk overhead): |
1. If one exists, use a remainderless chunk in associated smallbin. |
(Remainderless means that there are too few excess bytes to |
represent as a chunk.) |
2. If it is big enough, use the dv chunk, which is normally the |
chunk adjacent to the one used for the most recent small request. |
3. If one exists, split the smallest available chunk in a bin, |
saving remainder in dv. |
4. If it is big enough, use the top chunk. |
5. If available, get memory from system and use it |
Otherwise, for a large request: |
1. Find the smallest available binned chunk that fits, and use it |
if it is better fitting than dv chunk, splitting if necessary. |
2. If better fitting than any binned chunk, use the dv chunk. |
3. If it is big enough, use the top chunk. |
4. If request size >= mmap threshold, try to directly mmap this chunk. |
5. If available, get memory from system and use it |
The ugly goto's here ensure that postaction occurs along all paths. |
*/ |
if (!PREACTION(gm)) { |
void* mem; |
size_t nb; |
if (bytes <= MAX_SMALL_REQUEST) { |
bindex_t idx; |
binmap_t smallbits; |
nb = (bytes < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(bytes); |
idx = small_index(nb); |
smallbits = gm->smallmap >> idx; |
if ((smallbits & 0x3U) != 0) { /* Remainderless fit to a smallbin. */ |
mchunkptr b, p; |
idx += ~smallbits & 1; /* Uses next bin if idx empty */ |
b = smallbin_at(gm, idx); |
p = b->fd; |
assert(chunksize(p) == small_index2size(idx)); |
unlink_first_small_chunk(gm, b, p, idx); |
set_inuse_and_pinuse(gm, p, small_index2size(idx)); |
mem = chunk2mem(p); |
check_malloced_chunk(gm, mem, nb); |
goto postaction; |
} |
else if (nb > gm->dvsize) { |
if (smallbits != 0) { /* Use chunk in next nonempty smallbin */ |
mchunkptr b, p, r; |
size_t rsize; |
bindex_t i; |
binmap_t leftbits = (smallbits << idx) & left_bits(idx2bit(idx)); |
binmap_t leastbit = least_bit(leftbits); |
compute_bit2idx(leastbit, i); |
b = smallbin_at(gm, i); |
p = b->fd; |
assert(chunksize(p) == small_index2size(i)); |
unlink_first_small_chunk(gm, b, p, i); |
rsize = small_index2size(i) - nb; |
/* Fit here cannot be remainderless if 4byte sizes */ |
if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE) |
set_inuse_and_pinuse(gm, p, small_index2size(i)); |
else { |
set_size_and_pinuse_of_inuse_chunk(gm, p, nb); |
r = chunk_plus_offset(p, nb); |
set_size_and_pinuse_of_free_chunk(r, rsize); |
replace_dv(gm, r, rsize); |
} |
mem = chunk2mem(p); |
check_malloced_chunk(gm, mem, nb); |
goto postaction; |
} |
else if (gm->treemap != 0 && (mem = tmalloc_small(gm, nb)) != 0) { |
check_malloced_chunk(gm, mem, nb); |
goto postaction; |
} |
} |
} |
else if (bytes >= MAX_REQUEST) |
nb = MAX_SIZE_T; /* Too big to allocate. Force failure (in sys alloc) */ |
else { |
nb = pad_request(bytes); |
if (gm->treemap != 0 && (mem = tmalloc_large(gm, nb)) != 0) { |
check_malloced_chunk(gm, mem, nb); |
goto postaction; |
} |
} |
if (nb <= gm->dvsize) { |
size_t rsize = gm->dvsize - nb; |
mchunkptr p = gm->dv; |
if (rsize >= MIN_CHUNK_SIZE) { /* split dv */ |
mchunkptr r = gm->dv = chunk_plus_offset(p, nb); |
gm->dvsize = rsize; |
set_size_and_pinuse_of_free_chunk(r, rsize); |
set_size_and_pinuse_of_inuse_chunk(gm, p, nb); |
} |
else { /* exhaust dv */ |
size_t dvs = gm->dvsize; |
gm->dvsize = 0; |
gm->dv = 0; |
set_inuse_and_pinuse(gm, p, dvs); |
} |
mem = chunk2mem(p); |
check_malloced_chunk(gm, mem, nb); |
goto postaction; |
} |
else if (nb < gm->topsize) { /* Split top */ |
size_t rsize = gm->topsize -= nb; |
mchunkptr p = gm->top; |
mchunkptr r = gm->top = chunk_plus_offset(p, nb); |
r->head = rsize | PINUSE_BIT; |
set_size_and_pinuse_of_inuse_chunk(gm, p, nb); |
mem = chunk2mem(p); |
check_top_chunk(gm, gm->top); |
check_malloced_chunk(gm, mem, nb); |
goto postaction; |
} |
mem = sys_alloc(gm, nb); |
postaction: |
POSTACTION(gm); |
return mem; |
} |
return 0; |
} |
void dlfree(void* mem) { |
/* |
Consolidate freed chunks with preceeding or succeeding bordering |
free chunks, if they exist, and then place in a bin. Intermixed |
with special cases for top, dv, mmapped chunks, and usage errors. |
*/ |
if (mem != 0) { |
mchunkptr p = mem2chunk(mem); |
#if FOOTERS |
mstate fm = get_mstate_for(p); |
if (!ok_magic(fm)) { |
USAGE_ERROR_ACTION(fm, p); |
return; |
} |
#else /* FOOTERS */ |
#define fm gm |
#endif /* FOOTERS */ |
if (!PREACTION(fm)) { |
check_inuse_chunk(fm, p); |
if (RTCHECK(ok_address(fm, p) && ok_cinuse(p))) { |
size_t psize = chunksize(p); |
mchunkptr next = chunk_plus_offset(p, psize); |
if (!pinuse(p)) { |
size_t prevsize = p->prev_foot; |
if ((prevsize & IS_MMAPPED_BIT) != 0) { |
prevsize &= ~IS_MMAPPED_BIT; |
psize += prevsize + MMAP_FOOT_PAD; |
if (CALL_MUNMAP((char*)p - prevsize, psize) == 0) |
fm->footprint -= psize; |
goto postaction; |
} |
else { |
mchunkptr prev = chunk_minus_offset(p, prevsize); |
psize += prevsize; |
p = prev; |
if (RTCHECK(ok_address(fm, prev))) { /* consolidate backward */ |
if (p != fm->dv) { |
unlink_chunk(fm, p, prevsize); |
} |
else if ((next->head & INUSE_BITS) == INUSE_BITS) { |
fm->dvsize = psize; |
set_free_with_pinuse(p, psize, next); |
goto postaction; |
} |
} |
else |
goto erroraction; |
} |
} |
if (RTCHECK(ok_next(p, next) && ok_pinuse(next))) { |
if (!cinuse(next)) { /* consolidate forward */ |
if (next == fm->top) { |
size_t tsize = fm->topsize += psize; |
fm->top = p; |
p->head = tsize | PINUSE_BIT; |
if (p == fm->dv) { |
fm->dv = 0; |
fm->dvsize = 0; |
} |
if (should_trim(fm, tsize)) |
sys_trim(fm, 0); |
goto postaction; |
} |
else if (next == fm->dv) { |
size_t dsize = fm->dvsize += psize; |
fm->dv = p; |
set_size_and_pinuse_of_free_chunk(p, dsize); |
goto postaction; |
} |
else { |
size_t nsize = chunksize(next); |
psize += nsize; |
unlink_chunk(fm, next, nsize); |
set_size_and_pinuse_of_free_chunk(p, psize); |
if (p == fm->dv) { |
fm->dvsize = psize; |
goto postaction; |
} |
} |
} |
else |
set_free_with_pinuse(p, psize, next); |
insert_chunk(fm, p, psize); |
check_free_chunk(fm, p); |
goto postaction; |
} |
} |
erroraction: |
USAGE_ERROR_ACTION(fm, p); |
postaction: |
POSTACTION(fm); |
} |
} |
#if !FOOTERS |
#undef fm |
#endif /* FOOTERS */ |
} |
void* dlcalloc(size_t n_elements, size_t elem_size) { |
void* mem; |
size_t req = 0; |
if (n_elements != 0) { |
req = n_elements * elem_size; |
if (((n_elements | elem_size) & ~(size_t)0xffff) && |
(req / n_elements != elem_size)) |
req = MAX_SIZE_T; /* force downstream failure on overflow */ |
} |
mem = dlmalloc(req); |
if (mem != 0 && calloc_must_clear(mem2chunk(mem))) |
memset(mem, 0, req); |
return mem; |
} |
void* dlrealloc(void* oldmem, size_t bytes) { |
if (oldmem == 0) |
return dlmalloc(bytes); |
#ifdef REALLOC_ZERO_BYTES_FREES |
if (bytes == 0) { |
dlfree(oldmem); |
return 0; |
} |
#endif /* REALLOC_ZERO_BYTES_FREES */ |
else { |
#if ! FOOTERS |
mstate m = gm; |
#else /* FOOTERS */ |
mstate m = get_mstate_for(mem2chunk(oldmem)); |
if (!ok_magic(m)) { |
USAGE_ERROR_ACTION(m, oldmem); |
return 0; |
} |
#endif /* FOOTERS */ |
return internal_realloc(m, oldmem, bytes); |
} |
} |
void* dlmemalign(size_t alignment, size_t bytes) { |
return internal_memalign(gm, alignment, bytes); |
} |
void** dlindependent_calloc(size_t n_elements, size_t elem_size, |
void* chunks[]) { |
size_t sz = elem_size; /* serves as 1-element array */ |
return ialloc(gm, n_elements, &sz, 3, chunks); |
} |
void** dlindependent_comalloc(size_t n_elements, size_t sizes[], |
void* chunks[]) { |
return ialloc(gm, n_elements, sizes, 0, chunks); |
} |
void* dlvalloc(size_t bytes) { |
size_t pagesz; |
init_mparams(); |
pagesz = mparams.page_size; |
return dlmemalign(pagesz, bytes); |
} |
void* dlpvalloc(size_t bytes) { |
size_t pagesz; |
init_mparams(); |
pagesz = mparams.page_size; |
return dlmemalign(pagesz, (bytes + pagesz - SIZE_T_ONE) & ~(pagesz - SIZE_T_ONE)); |
} |
int dlmalloc_trim(size_t pad) { |
int result = 0; |
if (!PREACTION(gm)) { |
result = sys_trim(gm, pad); |
POSTACTION(gm); |
} |
return result; |
} |
size_t dlmalloc_footprint(void) { |
return gm->footprint; |
} |
size_t dlmalloc_max_footprint(void) { |
return gm->max_footprint; |
} |
#if !NO_MALLINFO |
struct mallinfo dlmallinfo(void) { |
return internal_mallinfo(gm); |
} |
#endif /* NO_MALLINFO */ |
//void dlmalloc_stats() { |
// internal_malloc_stats(gm); |
//} |
size_t dlmalloc_usable_size(void* mem) { |
if (mem != 0) { |
mchunkptr p = mem2chunk(mem); |
if (cinuse(p)) |
return chunksize(p) - overhead_for(p); |
} |
return 0; |
} |
int dlmallopt(int param_number, int value) { |
return change_mparam(param_number, value); |
} |
#endif /* !ONLY_MSPACES */ |
/* ----------------------------- user mspaces ---------------------------- */ |
#if MSPACES |
#endif /* MSPACES */ |
/* -------------------- Alternative MORECORE functions ------------------- */ |
/* |
Guidelines for creating a custom version of MORECORE: |
* For best performance, MORECORE should allocate in multiples of pagesize. |
* MORECORE may allocate more memory than requested. (Or even less, |
but this will usually result in a malloc failure.) |
* MORECORE must not allocate memory when given argument zero, but |
instead return one past the end address of memory from previous |
nonzero call. |
* For best performance, consecutive calls to MORECORE with positive |
arguments should return increasing addresses, indicating that |
space has been contiguously extended. |
* Even though consecutive calls to MORECORE need not return contiguous |
addresses, it must be OK for malloc'ed chunks to span multiple |
regions in those cases where they do happen to be contiguous. |
* MORECORE need not handle negative arguments -- it may instead |
just return MFAIL when given negative arguments. |
Negative arguments are always multiples of pagesize. MORECORE |
must not misinterpret negative args as large positive unsigned |
args. You can suppress all such calls from even occurring by defining |
MORECORE_CANNOT_TRIM, |
As an example alternative MORECORE, here is a custom allocator |
kindly contributed for pre-OSX macOS. It uses virtually but not |
necessarily physically contiguous non-paged memory (locked in, |
present and won't get swapped out). You can use it by uncommenting |
this section, adding some #includes, and setting up the appropriate |
defines above: |
#define MORECORE osMoreCore |
There is also a shutdown routine that should somehow be called for |
cleanup upon program exit. |
#define MAX_POOL_ENTRIES 100 |
#define MINIMUM_MORECORE_SIZE (64 * 1024U) |
static int next_os_pool; |
void *our_os_pools[MAX_POOL_ENTRIES]; |
void *osMoreCore(int size) |
{ |
void *ptr = 0; |
static void *sbrk_top = 0; |
if (size > 0) |
{ |
if (size < MINIMUM_MORECORE_SIZE) |
size = MINIMUM_MORECORE_SIZE; |
if (CurrentExecutionLevel() == kTaskLevel) |
ptr = PoolAllocateResident(size + RM_PAGE_SIZE, 0); |
if (ptr == 0) |
{ |
return (void *) MFAIL; |
} |
// save ptrs so they can be freed during cleanup |
our_os_pools[next_os_pool] = ptr; |
next_os_pool++; |
ptr = (void *) ((((size_t) ptr) + RM_PAGE_MASK) & ~RM_PAGE_MASK); |
sbrk_top = (char *) ptr + size; |
return ptr; |
} |
else if (size < 0) |
{ |
// we don't currently support shrink behavior |
return (void *) MFAIL; |
} |
else |
{ |
return sbrk_top; |
} |
} |
// cleanup any allocated memory pools |
// called as last thing before shutting down driver |
void osCleanupMem(void) |
{ |
void **ptr; |
for (ptr = our_os_pools; ptr < &our_os_pools[MAX_POOL_ENTRIES]; ptr++) |
if (*ptr) |
{ |
PoolDeallocate(*ptr); |
*ptr = 0; |
} |
} |
*/ |
/* ----------------------------------------------------------------------- |
History: |
V2.8.3 Thu Sep 22 11:16:32 2005 Doug Lea (dl at gee) |
* Add max_footprint functions |
* Ensure all appropriate literals are size_t |
* Fix conditional compilation problem for some #define settings |
* Avoid concatenating segments with the one provided |
in create_mspace_with_base |
* Rename some variables to avoid compiler shadowing warnings |
* Use explicit lock initialization. |
* Better handling of sbrk interference. |
* Simplify and fix segment insertion, trimming and mspace_destroy |
* Reinstate REALLOC_ZERO_BYTES_FREES option from 2.7.x |
* Thanks especially to Dennis Flanagan for help on these. |
V2.8.2 Sun Jun 12 16:01:10 2005 Doug Lea (dl at gee) |
* Fix memalign brace error. |
V2.8.1 Wed Jun 8 16:11:46 2005 Doug Lea (dl at gee) |
* Fix improper #endif nesting in C++ |
* Add explicit casts needed for C++ |
V2.8.0 Mon May 30 14:09:02 2005 Doug Lea (dl at gee) |
* Use trees for large bins |
* Support mspaces |
* Use segments to unify sbrk-based and mmap-based system allocation, |
removing need for emulation on most platforms without sbrk. |
* Default safety checks |
* Optional footer checks. Thanks to William Robertson for the idea. |
* Internal code refactoring |
* Incorporate suggestions and platform-specific changes. |
Thanks to Dennis Flanagan, Colin Plumb, Niall Douglas, |
Aaron Bachmann, Emery Berger, and others. |
* Speed up non-fastbin processing enough to remove fastbins. |
* Remove useless cfree() to avoid conflicts with other apps. |
* Remove internal memcpy, memset. Compilers handle builtins better. |
* Remove some options that no one ever used and rename others. |
V2.7.2 Sat Aug 17 09:07:30 2002 Doug Lea (dl at gee) |
* Fix malloc_state bitmap array misdeclaration |
V2.7.1 Thu Jul 25 10:58:03 2002 Doug Lea (dl at gee) |
* Allow tuning of FIRST_SORTED_BIN_SIZE |
* Use PTR_UINT as type for all ptr->int casts. Thanks to John Belmonte. |
* Better detection and support for non-contiguousness of MORECORE. |
Thanks to Andreas Mueller, Conal Walsh, and Wolfram Gloger |
* Bypass most of malloc if no frees. Thanks To Emery Berger. |
* Fix freeing of old top non-contiguous chunk im sysmalloc. |
* Raised default trim and map thresholds to 256K. |
* Fix mmap-related #defines. Thanks to Lubos Lunak. |
* Fix copy macros; added LACKS_FCNTL_H. Thanks to Neal Walfield. |
* Branch-free bin calculation |
* Default trim and mmap thresholds now 256K. |
V2.7.0 Sun Mar 11 14:14:06 2001 Doug Lea (dl at gee) |
* Introduce independent_comalloc and independent_calloc. |
Thanks to Michael Pachos for motivation and help. |
* Make optional .h file available |
* Allow > 2GB requests on 32bit systems. |
* new WIN32 sbrk, mmap, munmap, lock code from <Walter@GeNeSys-e.de>. |
Thanks also to Andreas Mueller <a.mueller at paradatec.de>, |
and Anonymous. |
* Allow override of MALLOC_ALIGNMENT (Thanks to Ruud Waij for |
helping test this.) |
* memalign: check alignment arg |
* realloc: don't try to shift chunks backwards, since this |
leads to more fragmentation in some programs and doesn't |
seem to help in any others. |
* Collect all cases in malloc requiring system memory into sysmalloc |
* Use mmap as backup to sbrk |
* Place all internal state in malloc_state |
* Introduce fastbins (although similar to 2.5.1) |
* Many minor tunings and cosmetic improvements |
* Introduce USE_PUBLIC_MALLOC_WRAPPERS, USE_MALLOC_LOCK |
* Introduce MALLOC_FAILURE_ACTION, MORECORE_CONTIGUOUS |
Thanks to Tony E. Bennett <tbennett@nvidia.com> and others. |
* Include errno.h to support default failure action. |
V2.6.6 Sun Dec 5 07:42:19 1999 Doug Lea (dl at gee) |
* return null for negative arguments |
* Added Several WIN32 cleanups from Martin C. Fong <mcfong at yahoo.com> |
* Add 'LACKS_SYS_PARAM_H' for those systems without 'sys/param.h' |
(e.g. WIN32 platforms) |
* Cleanup header file inclusion for WIN32 platforms |
* Cleanup code to avoid Microsoft Visual C++ compiler complaints |
* Add 'USE_DL_PREFIX' to quickly allow co-existence with existing |
memory allocation routines |
* Set 'malloc_getpagesize' for WIN32 platforms (needs more work) |
* Use 'assert' rather than 'ASSERT' in WIN32 code to conform to |
usage of 'assert' in non-WIN32 code |
* Improve WIN32 'sbrk()' emulation's 'findRegion()' routine to |
avoid infinite loop |
* Always call 'fREe()' rather than 'free()' |
V2.6.5 Wed Jun 17 15:57:31 1998 Doug Lea (dl at gee) |
* Fixed ordering problem with boundary-stamping |
V2.6.3 Sun May 19 08:17:58 1996 Doug Lea (dl at gee) |
* Added pvalloc, as recommended by H.J. Liu |
* Added 64bit pointer support mainly from Wolfram Gloger |
* Added anonymously donated WIN32 sbrk emulation |
* Malloc, calloc, getpagesize: add optimizations from Raymond Nijssen |
* malloc_extend_top: fix mask error that caused wastage after |
foreign sbrks |
* Add linux mremap support code from HJ Liu |
V2.6.2 Tue Dec 5 06:52:55 1995 Doug Lea (dl at gee) |
* Integrated most documentation with the code. |
* Add support for mmap, with help from |
Wolfram Gloger (Gloger@lrz.uni-muenchen.de). |
* Use last_remainder in more cases. |
* Pack bins using idea from colin@nyx10.cs.du.edu |
* Use ordered bins instead of best-fit threshhold |
* Eliminate block-local decls to simplify tracing and debugging. |
* Support another case of realloc via move into top |
* Fix error occuring when initial sbrk_base not word-aligned. |
* Rely on page size for units instead of SBRK_UNIT to |
avoid surprises about sbrk alignment conventions. |
* Add mallinfo, mallopt. Thanks to Raymond Nijssen |
(raymond@es.ele.tue.nl) for the suggestion. |
* Add `pad' argument to malloc_trim and top_pad mallopt parameter. |
* More precautions for cases where other routines call sbrk, |
courtesy of Wolfram Gloger (Gloger@lrz.uni-muenchen.de). |
* Added macros etc., allowing use in linux libc from |
H.J. Lu (hjl@gnu.ai.mit.edu) |
* Inverted this history list |
V2.6.1 Sat Dec 2 14:10:57 1995 Doug Lea (dl at gee) |
* Re-tuned and fixed to behave more nicely with V2.6.0 changes. |
* Removed all preallocation code since under current scheme |
the work required to undo bad preallocations exceeds |
the work saved in good cases for most test programs. |
* No longer use return list or unconsolidated bins since |
no scheme using them consistently outperforms those that don't |
given above changes. |
* Use best fit for very large chunks to prevent some worst-cases. |
* Added some support for debugging |
V2.6.0 Sat Nov 4 07:05:23 1995 Doug Lea (dl at gee) |
* Removed footers when chunks are in use. Thanks to |
Paul Wilson (wilson@cs.texas.edu) for the suggestion. |
V2.5.4 Wed Nov 1 07:54:51 1995 Doug Lea (dl at gee) |
* Added malloc_trim, with help from Wolfram Gloger |
(wmglo@Dent.MED.Uni-Muenchen.DE). |
V2.5.3 Tue Apr 26 10:16:01 1994 Doug Lea (dl at g) |
V2.5.2 Tue Apr 5 16:20:40 1994 Doug Lea (dl at g) |
* realloc: try to expand in both directions |
* malloc: swap order of clean-bin strategy; |
* realloc: only conditionally expand backwards |
* Try not to scavenge used bins |
* Use bin counts as a guide to preallocation |
* Occasionally bin return list chunks in first scan |
* Add a few optimizations from colin@nyx10.cs.du.edu |
V2.5.1 Sat Aug 14 15:40:43 1993 Doug Lea (dl at g) |
* faster bin computation & slightly different binning |
* merged all consolidations to one part of malloc proper |
(eliminating old malloc_find_space & malloc_clean_bin) |
* Scan 2 returns chunks (not just 1) |
* Propagate failure in realloc if malloc returns 0 |
* Add stuff to allow compilation on non-ANSI compilers |
from kpv@research.att.com |
V2.5 Sat Aug 7 07:41:59 1993 Doug Lea (dl at g.oswego.edu) |
* removed potential for odd address access in prev_chunk |
* removed dependency on getpagesize.h |
* misc cosmetics and a bit more internal documentation |
* anticosmetics: mangled names in macros to evade debugger strangeness |
* tested on sparc, hp-700, dec-mips, rs6000 |
with gcc & native cc (hp, dec only) allowing |
Detlefs & Zorn comparison study (in SIGPLAN Notices.) |
Trial version Fri Aug 28 13:14:29 1992 Doug Lea (dl at g.oswego.edu) |
* Based loosely on libg++-1.2X malloc. (It retains some of the overall |
structure of old version, but most details differ.) |
*/ |
/drivers/ddk/stdio/chartab.c |
---|
0,0 → 1,261 |
#include "ctype.h" |
char __ctype[] = { |
0, |
_C, |
_C, |
_C, |
_C, |
_C, |
_C, |
_C, |
_C, |
_C, |
_C|_S, |
_C|_S, |
_C|_S, |
_C|_S, |
_C|_S, |
_C, |
_C, |
_C, |
_C, |
_C, |
_C, |
_C, |
_C, |
_C, |
_C, |
_C, |
_C, |
_C, |
_C, |
_C, |
_C, |
_C, |
_C, |
_S, |
_P, |
_P, |
_P, |
_P, |
_P, |
_P, |
_P, |
_P, |
_P, |
_P, |
_P, |
_P, |
_P, |
_P, |
_P, |
_N, |
_N, |
_N, |
_N, |
_N, |
_N, |
_N, |
_N, |
_N, |
_N, |
_P, |
_P, |
_P, |
_P, |
_P, |
_P, |
_P, |
_U|_X, |
_U|_X, |
_U|_X, |
_U|_X, |
_U|_X, |
_U|_X, |
_U, |
_U, |
_U, |
_U, |
_U, |
_U, |
_U, |
_U, |
_U, |
_U, |
_U, |
_U, |
_U, |
_U, |
_U, |
_U, |
_U, |
_U, |
_U, |
_U, |
_P, |
_P, |
_P, |
_P, |
_P, |
_P, |
_L|_X, |
_L|_X, |
_L|_X, |
_L|_X, |
_L|_X, |
_L|_X, |
_L, |
_L, |
_L, |
_L, |
_L, |
_L, |
_L, |
_L, |
_L, |
_L, |
_L, |
_L, |
_L, |
_L, |
_L, |
_L, |
_L, |
_L, |
_L, |
_L, |
_P, |
_P, |
_P, |
_P, |
_C, |
0, |
0, |
0, |
0, |
0, |
0, |
0, |
0, |
0, |
0, |
0, |
0, |
0, |
0, |
0, |
0, |
0, |
0, |
0, |
0, |
0, |
0, |
0, |
0, |
0, |
0, |
0, |
0, |
0, |
0, |
0, |
0, |
0, |
0, |
0, |
0, |
0, |
0, |
0, |
0, |
0, |
0, |
0, |
0, |
0, |
0, |
0, |
0, |
0, |
0, |
0, |
0, |
0, |
0, |
0, |
0, |
0, |
0, |
0, |
0, |
0, |
0, |
0, |
0, |
0, |
0, |
0, |
0, |
0, |
0, |
0, |
0, |
0, |
0, |
0, |
0, |
0, |
0, |
0, |
0, |
0, |
0, |
0, |
0, |
0, |
0, |
0, |
0, |
0, |
0, |
0, |
0, |
0, |
0, |
0, |
0, |
0, |
0, |
0, |
0, |
0, |
0, |
0, |
0, |
0, |
0, |
0, |
0, |
0, |
0, |
0, |
0, |
0, |
0, |
0, |
0, |
0, |
0, |
0, |
0, |
0, |
0, |
0, |
0, |
0, |
0, |
0, |
0, |
}; |
/drivers/ddk/stdio/ctype.h |
---|
0,0 → 1,44 |
/* The <ctype.h> header file defines some macros used to identify characters. |
* It works by using a table stored in chartab.c. When a character is presented |
* to one of these macros, the character is used as an index into the table |
* (__ctype) to retrieve a byte. The relevant bit is then extracted. |
*/ |
#ifndef _CTYPE_H |
#define _CTYPE_H |
extern char __ctype[]; /* property array defined in chartab.c */ |
#define _U 0x01 /* this bit is for upper-case letters [A-Z] */ |
#define _L 0x02 /* this bit is for lower-case letters [a-z] */ |
#define _N 0x04 /* this bit is for numbers [0-9] */ |
#define _S 0x08 /* this bit is for white space \t \n \f etc */ |
#define _P 0x10 /* this bit is for punctuation characters */ |
#define _C 0x20 /* this bit is for control characters */ |
#define _X 0x40 /* this bit is for hex digits [a-f] and [A-F]*/ |
/* Macros for identifying character classes. */ |
#define isalnum(c) ((__ctype+1)[c]&(_U|_L|_N)) |
#define isalpha(c) ((__ctype+1)[c]&(_U|_L)) |
#define iscntrl(c) ((__ctype+1)[c]&_C) |
#define isgraph(c) ((__ctype+1)[c]&(_P|_U|_L|_N)) |
#define ispunct(c) ((__ctype+1)[c]&_P) |
#define isspace(c) ((__ctype+1)[c]&_S) |
#define isxdigit(c) ((__ctype+1)[c]&(_N|_X)) |
#define isdigit(c) ((unsigned) ((c)-'0') < 10) |
#define islower(c) ((unsigned) ((c)-'a') < 26) |
#define isupper(c) ((unsigned) ((c)-'A') < 26) |
#define isprint(c) ((unsigned) ((c)-' ') < 95) |
#define isascii(c) ((unsigned) (c) < 128) |
#define toascii(c) ((c) & 0x7f) |
static inline int toupper(int c) |
{ |
return islower(c) ? c - 'a' + 'A' : c ; |
} |
#endif /* _CTYPE_H */ |
/drivers/ddk/stdio/doprnt.c |
---|
0,0 → 1,315 |
/* |
* doprnt.c - print formatted output |
*/ |
/* $Header$ */ |
#include "ctype.h" |
#include "stdio.h" |
#include <stdarg.h> |
#include <string.h> |
#include "loc_incl.h" |
#define NOFLOAT |
#define putc(c, p) (--(p)->_count >= 0 ? \ |
(int) (*(p)->_ptr++ = (c)) : EOF) |
/* gnum() is used to get the width and precision fields of a format. */ |
static const char * |
gnum(register const char *f, int *ip, va_list *app) |
{ |
register int i, c; |
if (*f == '*') { |
*ip = va_arg((*app), int); |
f++; |
} else { |
i = 0; |
while ((c = *f - '0') >= 0 && c <= 9) { |
i = i*10 + c; |
f++; |
} |
*ip = i; |
} |
return f; |
} |
#if _EM_WSIZE == _EM_PSIZE |
#define set_pointer(flags) /* nothing */ |
#elif _EM_LSIZE == _EM_PSIZE |
#define set_pointer(flags) (flags |= FL_LONG) |
#else |
#error garbage pointer size |
#define set_pointer(flags) /* compilation might continue */ |
#endif |
/* print an ordinal number */ |
static char * |
o_print(va_list *ap, int flags, char *s, char c, int precision, int is_signed) |
{ |
long signed_val; |
unsigned long unsigned_val; |
char *old_s = s; |
int base; |
switch (flags & (FL_SHORT | FL_LONG)) { |
case FL_SHORT: |
if (is_signed) { |
signed_val = (short) va_arg(*ap, int); |
} else { |
unsigned_val = (unsigned short) va_arg(*ap, unsigned); |
} |
break; |
case FL_LONG: |
if (is_signed) { |
signed_val = va_arg(*ap, long); |
} else { |
unsigned_val = va_arg(*ap, unsigned long); |
} |
break; |
default: |
if (is_signed) { |
signed_val = va_arg(*ap, int); |
} else { |
unsigned_val = va_arg(*ap, unsigned int); |
} |
break; |
} |
if (is_signed) { |
if (signed_val < 0) { |
*s++ = '-'; |
signed_val = -signed_val; |
} else if (flags & FL_SIGN) *s++ = '+'; |
else if (flags & FL_SPACE) *s++ = ' '; |
unsigned_val = signed_val; |
} |
if ((flags & FL_ALT) && (c == 'o')) *s++ = '0'; |
if (!unsigned_val && c != 'p') { |
if (!precision) |
return s; |
} else if (((flags & FL_ALT) && (c == 'x' || c == 'X')) |
|| c == 'p') { |
*s++ = '0'; |
*s++ = (c == 'X' ? 'X' : 'x'); |
} |
switch (c) { |
case 'b': base = 2; break; |
case 'o': base = 8; break; |
case 'd': |
case 'i': |
case 'u': base = 10; break; |
case 'x': |
case 'X': |
case 'p': base = 16; break; |
} |
s = _i_compute(unsigned_val, base, s, precision); |
if (c == 'X') |
while (old_s != s) { |
*old_s = toupper(*old_s); |
old_s++; |
} |
return s; |
} |
int |
_doprnt(register const char *fmt, va_list ap, FILE *stream) |
{ |
register char *s; |
register int j; |
int i, c, width, precision, zfill, flags, between_fill; |
int nrchars=0; |
const char *oldfmt; |
char *s1, buf[1025]; |
while (c = *fmt++) { |
if (c != '%') { |
#ifdef CPM |
if (c == '\n') { |
if (putc('\r', stream) == EOF) |
return nrchars ? -nrchars : -1; |
nrchars++; |
} |
#endif |
if (putc(c, stream) == EOF) |
return nrchars ? -nrchars : -1; |
nrchars++; |
continue; |
} |
flags = 0; |
do { |
switch(*fmt) { |
case '-': flags |= FL_LJUST; break; |
case '+': flags |= FL_SIGN; break; |
case ' ': flags |= FL_SPACE; break; |
case '#': flags |= FL_ALT; break; |
case '0': flags |= FL_ZEROFILL; break; |
default: flags |= FL_NOMORE; continue; |
} |
fmt++; |
} while(!(flags & FL_NOMORE)); |
oldfmt = fmt; |
fmt = gnum(fmt, &width, &ap); |
if (fmt != oldfmt) flags |= FL_WIDTHSPEC; |
if (*fmt == '.') { |
fmt++; oldfmt = fmt; |
fmt = gnum(fmt, &precision, &ap); |
if (precision >= 0) flags |= FL_PRECSPEC; |
} |
if ((flags & FL_WIDTHSPEC) && width < 0) { |
width = -width; |
flags |= FL_LJUST; |
} |
if (!(flags & FL_WIDTHSPEC)) width = 0; |
if (flags & FL_SIGN) flags &= ~FL_SPACE; |
if (flags & FL_LJUST) flags &= ~FL_ZEROFILL; |
s = s1 = buf; |
switch (*fmt) { |
case 'h': flags |= FL_SHORT; fmt++; break; |
case 'l': flags |= FL_LONG; fmt++; break; |
case 'L': flags |= FL_LONGDOUBLE; fmt++; break; |
} |
switch (c = *fmt++) { |
default: |
#ifdef CPM |
if (c == '\n') { |
if (putc('\r', stream) == EOF) |
return nrchars ? -nrchars : -1; |
nrchars++; |
} |
#endif |
if (putc(c, stream) == EOF) |
return nrchars ? -nrchars : -1; |
nrchars++; |
continue; |
case 'n': |
if (flags & FL_SHORT) |
*va_arg(ap, short *) = (short) nrchars; |
else if (flags & FL_LONG) |
*va_arg(ap, long *) = (long) nrchars; |
else |
*va_arg(ap, int *) = (int) nrchars; |
continue; |
case 's': |
s1 = va_arg(ap, char *); |
if (s1 == NULL) |
s1 = "(null)"; |
s = s1; |
while (precision || !(flags & FL_PRECSPEC)) { |
if (*s == '\0') |
break; |
s++; |
precision--; |
} |
break; |
case 'p': |
set_pointer(flags); |
/* fallthrough */ |
case 'b': |
case 'o': |
case 'u': |
case 'x': |
case 'X': |
if (!(flags & FL_PRECSPEC)) precision = 1; |
else if (c != 'p') flags &= ~FL_ZEROFILL; |
s = o_print(&ap, flags, s, c, precision, 0); |
break; |
case 'd': |
case 'i': |
flags |= FL_SIGNEDCONV; |
if (!(flags & FL_PRECSPEC)) precision = 1; |
else flags &= ~FL_ZEROFILL; |
s = o_print(&ap, flags, s, c, precision, 1); |
break; |
case 'c': |
*s++ = va_arg(ap, int); |
break; |
#ifndef NOFLOAT |
case 'G': |
case 'g': |
if ((flags & FL_PRECSPEC) && (precision == 0)) |
precision = 1; |
case 'f': |
case 'E': |
case 'e': |
if (!(flags & FL_PRECSPEC)) |
precision = 6; |
if (precision >= sizeof(buf)) |
precision = sizeof(buf) - 1; |
flags |= FL_SIGNEDCONV; |
s = _f_print(&ap, flags, s, c, precision); |
break; |
#endif /* NOFLOAT */ |
case 'r': |
ap = va_arg(ap, va_list); |
fmt = va_arg(ap, char *); |
continue; |
} |
zfill = ' '; |
if (flags & FL_ZEROFILL) zfill = '0'; |
j = s - s1; |
/* between_fill is true under the following conditions: |
* 1- the fill character is '0' |
* and |
* 2a- the number is of the form 0x... or 0X... |
* or |
* 2b- the number contains a sign or space |
*/ |
between_fill = 0; |
if ((flags & FL_ZEROFILL) |
&& (((c == 'x' || c == 'X') && (flags & FL_ALT) && j > 1) |
|| (c == 'p') |
|| ((flags & FL_SIGNEDCONV) |
&& ( *s1 == '+' || *s1 == '-' || *s1 == ' ')))) |
between_fill++; |
if ((i = width - j) > 0) |
if (!(flags & FL_LJUST)) { /* right justify */ |
nrchars += i; |
if (between_fill) { |
if (flags & FL_SIGNEDCONV) { |
j--; nrchars++; |
if (putc(*s1++, stream) == EOF) |
return nrchars ? -nrchars : -1; |
} else { |
j -= 2; nrchars += 2; |
if ((putc(*s1++, stream) == EOF) |
|| (putc(*s1++, stream) == EOF)) |
return nrchars ? -nrchars : -1; |
} |
} |
do { |
if (putc(zfill, stream) == EOF) |
return nrchars ? -nrchars : -1; |
} while (--i); |
} |
nrchars += j; |
while (--j >= 0) { |
if (putc(*s1++, stream) == EOF) |
return nrchars ? -nrchars : -1; |
} |
if (i > 0) nrchars += i; |
while (--i >= 0) |
if (putc(zfill, stream) == EOF) |
return nrchars ? -nrchars : -1; |
} |
return nrchars; |
} |
/drivers/ddk/stdio/icompute.c |
---|
0,0 → 1,21 |
/* |
* icompute.c - compute an integer |
*/ |
/* $Header$ */ |
#include "loc_incl.h" |
/* This routine is used in doprnt.c as well as in tmpfile.c and tmpnam.c. */ |
char * |
_i_compute(unsigned long val, int base, char *s, int nrdigits) |
{ |
int c; |
c= val % base ; |
val /= base ; |
if (val || nrdigits > 1) |
s = _i_compute(val, base, s, nrdigits - 1); |
*s++ = (c>9 ? c-10+'a' : c+'0'); |
return s; |
} |
/drivers/ddk/stdio/loc_incl.h |
---|
0,0 → 1,37 |
/* |
* loc_incl.h - local include file for stdio library |
*/ |
/* $Header$ */ |
#include "stdio.h" |
#define io_testflag(p,x) ((p)->_flags & (x)) |
#include <stdarg.h> |
#ifdef _ANSI |
int _doprnt(const char *format, va_list ap, FILE *stream); |
int _doscan(FILE * stream, const char *format, va_list ap); |
char *_i_compute(unsigned long val, int base, char *s, int nrdigits); |
char *_f_print(va_list *ap, int flags, char *s, char c, int precision); |
void __cleanup(void); |
#ifndef NOFLOAT |
char *_ecvt(long double value, int ndigit, int *decpt, int *sign); |
char *_fcvt(long double value, int ndigit, int *decpt, int *sign); |
#endif /* NOFLOAT */ |
#endif |
#define FL_LJUST 0x0001 /* left-justify field */ |
#define FL_SIGN 0x0002 /* sign in signed conversions */ |
#define FL_SPACE 0x0004 /* space in signed conversions */ |
#define FL_ALT 0x0008 /* alternate form */ |
#define FL_ZEROFILL 0x0010 /* fill with zero's */ |
#define FL_SHORT 0x0020 /* optional h */ |
#define FL_LONG 0x0040 /* optional l */ |
#define FL_LONGDOUBLE 0x0080 /* optional L */ |
#define FL_WIDTHSPEC 0x0100 /* field width is specified */ |
#define FL_PRECSPEC 0x0200 /* precision is specified */ |
#define FL_SIGNEDCONV 0x0400 /* may contain a sign */ |
#define FL_NOASSIGN 0x0800 /* do not assign (in scanf) */ |
#define FL_NOMORE 0x1000 /* all flags collected */ |
/drivers/ddk/stdio/stdio.h |
---|
0,0 → 1,46 |
/* |
* stdio.h - input/output definitions |
* |
* (c) copyright 1987 by the Vrije Universiteit, Amsterdam, The Netherlands. |
* See the copyright notice in the ACK home directory, in the file "Copyright". |
*/ |
/* $Header$ */ |
#ifndef _STDIO_H |
#define _STDIO_H |
/* |
* Focus point of all stdio activity. |
*/ |
typedef struct __iobuf { |
int _count; |
int _fd; |
int _flags; |
int _bufsiz; |
unsigned char *_buf; |
unsigned char *_ptr; |
} FILE; |
#define _IOFBF 0x000 |
#define _IOREAD 0x001 |
#define _IOWRITE 0x002 |
#define _IONBF 0x004 |
#define _IOMYBUF 0x008 |
#define _IOEOF 0x010 |
#define _IOERR 0x020 |
#define _IOLBF 0x040 |
#define _IOREADING 0x080 |
#define _IOWRITING 0x100 |
#define _IOAPPEND 0x200 |
#define _IOFIFO 0x400 |
/* The following definitions are also in <unistd.h>. They should not |
* conflict. |
*/ |
#define SEEK_SET 0 |
#define SEEK_CUR 1 |
#define SEEK_END 2 |
#define EOF (-1) |
#endif /* _STDIO_H */ |
/drivers/ddk/stdio/vsprintf.c |
---|
0,0 → 1,37 |
/* |
* vsprintf - print formatted output without ellipsis on an array |
*/ |
/* $Header$ */ |
#include "stdio.h" |
#include <stdarg.h> |
#include <limits.h> |
#include "loc_incl.h" |
#define putc(c, p) (--(p)->_count >= 0 ? \ |
(int) (*(p)->_ptr++ = (c)) : EOF) |
int |
vsnprintf(char *s, unsigned n, const char *format, va_list arg) |
{ |
int retval; |
FILE tmp_stream; |
tmp_stream._fd = -1; |
tmp_stream._flags = _IOWRITE + _IONBF + _IOWRITING; |
tmp_stream._buf = (unsigned char *) s; |
tmp_stream._ptr = (unsigned char *) s; |
tmp_stream._count = n-1; |
retval = _doprnt(format, arg, &tmp_stream); |
tmp_stream._count = 1; |
putc('\0',&tmp_stream); |
return retval; |
} |
int |
vsprintf(char *s, const char *format, va_list arg) |
{ |
return vsnprintf(s, INT_MAX, format, arg); |
} |
/drivers/ddk/string/_memmove.S |
---|
0,0 → 1,67 |
# _memmove() Author: Kees J. Bot 2 Jan 1994 |
# void *_memmove(void *s1, const void *s2, size_t n) |
# Copy a chunk of memory. Handle overlap. |
.intel_syntax |
.globl __memmove, __memcpy |
.text |
.align 16 |
__memmove: |
push ebp |
mov ebp, esp |
push esi |
push edi |
mov edi, [ebp+8] # String s1 |
mov esi, [ebp+12] # String s2 |
mov ecx, [ebp+16] # Length |
mov eax, edi |
sub eax, esi |
cmp eax, ecx |
jb downwards # if (s2 - s1) < n then copy downwards |
__memcpy: |
cld # Clear direction bit: upwards |
cmp ecx, 16 |
jb upbyte # Don't bother being smart with short arrays |
mov eax, esi |
or eax, edi |
testb al, 1 |
jnz upbyte # Bit 0 set, use byte copy |
testb al, 2 |
jnz upword # Bit 1 set, use word copy |
uplword: |
shrd eax, ecx, 2 # Save low 2 bits of ecx in eax |
shr ecx, 2 |
rep movsd # Copy longwords. |
shld ecx, eax, 2 # Restore excess count |
upword: |
shr ecx, 1 |
rep movsw # Copy words |
adc ecx, ecx # One more byte? |
upbyte: |
rep movsb # Copy bytes |
done: |
mov eax, [ebp+8] # Absolutely noone cares about this value |
pop edi |
pop esi |
pop ebp |
ret |
# Handle bad overlap by copying downwards, don't bother to do word copies. |
downwards: |
std # Set direction bit: downwards |
lea esi, [esi+ecx-1] |
lea edi, [edi+ecx-1] |
rep movsb # Copy bytes |
cld |
jmp done |
/drivers/ddk/string/_strncat.S |
---|
0,0 → 1,43 |
# _strncat() Author: Kees J. Bot |
# 1 Jan 1994 |
# char *_strncat(char *s1, const char *s2, size_t edx) |
# Append string s2 to s1. |
# |
.intel_syntax |
.global __strncat |
.text |
.align 16 |
__strncat: |
push ebp |
mov ebp, esp |
push esi |
push edi |
mov edi, [ebp+8] # String s1 |
mov ecx, -1 |
xorb al, al # Null byte |
cld |
repne |
scasb # Look for the zero byte in s1 |
dec edi # Back one up (and clear 'Z' flag) |
push edi # Save end of s1 |
mov edi, [12+ebp] # edi = string s2 |
mov ecx, edx # Maximum count |
repne |
scasb # Look for the end of s2 |
jne no0 |
inc ecx # Exclude null byte |
no0: sub edx, ecx # Number of bytes in s2 |
mov ecx, edx |
mov esi, [12+ebp] # esi = string s2 |
pop edi # edi = end of string s1 |
rep |
movsb # Copy bytes |
stosb # Add a terminating null |
mov eax, [8+ebp] # Return s1 |
pop edi |
pop esi |
pop ebp |
ret |
/drivers/ddk/string/_strncmp.S |
---|
0,0 → 1,44 |
# strncmp() Author: Kees J. Bot 1 Jan 1994 |
# int strncmp(const char *s1, const char *s2, size_t ecx) |
# Compare two strings. |
# |
.intel_syntax |
.globl __strncmp |
.text |
.align 16 |
__strncmp: |
push ebp |
mov ebp, esp |
push esi |
push edi |
test ecx, ecx # Max length is zero? |
je done |
mov esi, [ebp+8] # esi = string s1 |
mov edi, [ebp+12] # edi = string s2 |
cld |
compare: |
cmpsb # Compare two bytes |
jne done |
cmpb [esi-1], 0 # End of string? |
je done |
dec ecx # Length limit reached? |
jne compare |
done: |
seta al # al = (s1 > s2) |
setb ah # ah = (s1 < s2) |
subb al, ah |
movsx eax, al # eax = (s1 > s2) - (s1 < s2), i.e. -1, 0, 1 |
pop edi |
pop esi |
pop ebp |
ret |
/drivers/ddk/string/_strncpy.S |
---|
0,0 → 1,27 |
# _strncpy() Author: Kees J. Bot |
# 1 Jan 1994 |
# char *_strncpy(char *s1, const char *s2, size_t ecx) |
# Copy string s2 to s1. |
# |
.intel_syntax |
.text |
.globl __strncpy |
.align 16 |
__strncpy: |
mov edi, [ebp+12] # edi = string s2 |
xorb al, al # Look for a zero byte |
mov edx, ecx # Save maximum count |
cld |
repne |
scasb # Look for end of s2 |
sub edx, ecx # Number of bytes in s2 including null |
xchg ecx, edx |
mov esi, [ebp+12] # esi = string s2 |
mov edi, [ebp+8] # edi = string s1 |
rep |
movsb # Copy bytes |
ret |
/drivers/ddk/string/_strnlen.S |
---|
0,0 → 1,30 |
# _strnlen() Author: Kees J. Bot 1 Jan 1994 |
# size_t _strnlen(const char *s, size_t ecx) |
# Return the length of a string. |
.intel_syntax |
.globl __strnlen |
.text |
.align 16 |
__strnlen: |
push ebp |
mov ebp, esp |
push edi |
mov edi, [ebp+8] # edi = string |
xorb al, al # Look for a zero byte |
mov edx, ecx # Save maximum count |
cmpb cl, 1 # 'Z' bit must be clear if ecx = 0 |
cld |
repne |
scasb # Look for zero |
jne no0 |
inc ecx # Don't count zero byte |
no0: |
mov eax, edx |
sub eax, ecx # Compute bytes scanned |
pop edi |
pop ebp |
ret |
/drivers/ddk/string/memcmp.S |
---|
0,0 → 1,59 |
# memcmp() Author: Kees J. Bot |
# 2 Jan 1994 |
# int memcmp(const void *s1, const void *s2, size_t n) |
# Compare two chunks of memory. |
# |
.intel_syntax |
.globl _memcmp |
.text |
.align 16 |
_memcmp: |
cld |
push ebp |
mov ebp, esp |
push esi |
push edi |
mov esi, [8+ebp] # String s1 |
mov edi, [12+ebp] # String s2 |
mov ecx, [16+ebp] # Length |
cmp ecx, 16 |
jb cbyte # Don't bother being smart with short arrays |
mov eax, esi |
or eax, edi |
testb al, 1 |
jnz cbyte # Bit 0 set, use byte compare |
testb al, 2 |
jnz cword # Bit 1 set, use word compare |
clword: shrd eax, ecx, 2 # Save low two bits of ecx in eax |
shr ecx, 2 |
repe |
cmpsd # Compare longwords |
sub esi, 4 |
sub edi, 4 |
inc ecx # Recompare the last longword |
shld ecx, eax, 2 # And any excess bytes |
jmp last |
cword: shrd eax, ecx, 1 # Save low bit of ecx in eax |
shr ecx, 1 |
repe |
cmpsw # Compare words |
sub esi, 2 |
sub edi, 2 |
inc ecx # Recompare the last word |
shld ecx, eax, 1 # And one more byte? |
cbyte: test ecx, ecx # Set 'Z' flag if ecx = 0 |
last: repe |
cmpsb # Look for the first differing byte |
seta al # al = (s1 > s2) |
setb ah # ah = (s1 < s2) |
subb al, ah |
movsxb eax, al # eax = (s1 > s2) - (s1 < s2), i.e. -1, 0, 1 |
mov edx, esi # For bcmp() to play with |
pop edi |
pop esi |
pop ebp |
ret |
/drivers/ddk/string/memcpy.S |
---|
0,0 → 1,26 |
# memcpy() Author: Kees J. Bot 2 Jan 1994 |
# void *memcpy(void *s1, const void *s2, size_t n) |
# Copy a chunk of memory. |
# This routine need not handle overlap, so it does not handle overlap. |
# One could simply call __memmove, the cost of the overlap check is |
# negligible, but you are dealing with a programmer who believes that |
# if anything can go wrong, it should go wrong. |
.intel_syntax |
.globl _memcpy |
.text |
.align 16 |
_memcpy: |
push ebp |
mov ebp, esp |
push esi |
push edi |
mov edi, [ebp+8] # String s1 |
mov esi, [ebp+12] # String s2 |
mov ecx, [ebp+16] # Length |
# No overlap check here |
jmp __memcpy # Call the part of __memmove that copies up |
/drivers/ddk/string/memset.S |
---|
0,0 → 1,47 |
# memset() Author: Kees J. Bot |
# 2 Jan 1994 |
# void *memset(void *s, int c, size_t n) |
# Set a chunk of memory to the same byte value. |
# |
.intel_syntax |
.global _memset |
.text |
.align 16 |
_memset: |
push ebp |
mov ebp, esp |
push edi |
mov edi, [8+ebp] # The string |
movzx eax, byte ptr [12+ebp] # The fill byte |
mov ecx, [16+ebp] # Length |
cld |
cmp ecx, 16 |
jb sbyte # Don't bother being smart with short arrays |
test edi, 1 |
jnz sbyte # Bit 0 set, use byte store |
test edi, 2 |
jnz sword # Bit 1 set, use word store |
slword: |
movb ah, al |
mov edx, eax |
sal edx, 16 |
or eax, edx # One byte to four bytes |
shrd edx, ecx, 2 # Save low two bits of ecx in edx |
shr ecx, 2 |
rep stosd # Store longwords. |
shld ecx, edx, 2 # Restore low two bits |
sword: |
movb ah, al # One byte to two bytes |
shr ecx, 1 |
rep stosw # Store words |
adc ecx, ecx # One more byte? |
sbyte: |
rep stosb # Store bytes |
done: |
mov eax, [8+ebp] # Return some value you have no need for |
pop edi |
pop ebp |
ret |
/drivers/ddk/string/strcat.S |
---|
0,0 → 1,15 |
# strcat() Author: Kees J. Bot |
# 1 Jan 1994 |
# char *strcat(char *s1, const char *s2) |
# Append string s2 to s1. |
# |
.intel_syntax |
.global _strcat |
.text |
.align 16 |
_strcat: |
mov edx, -1 # Unlimited length |
jmp __strncat # Common code |
/drivers/ddk/string/strchr.S |
---|
0,0 → 1,46 |
# strchr() Author: Kees J. Bot 1 Jan 1994 |
# char *strchr(const char *s, int c) |
# Look for a character in a string. |
.intel_syntax |
.globl _strchr |
.text |
.align 16 |
_strchr: |
push ebp |
mov ebp, esp |
push edi |
cld |
mov edi, [ebp+8] # edi = string |
mov edx, 16 # Look at small chunks of the string |
next: |
shl edx, 1 # Chunks become bigger each time |
mov ecx, edx |
xorb al, al # Look for the zero at the end |
repne scasb |
pushf # Remember the flags |
sub ecx, edx |
neg ecx # Some or all of the chunk |
sub edi, ecx # Step back |
movb al, [ebp+12] # The character to look for |
repne scasb |
je found |
popf # Did we find the end of string earlier? |
jne next # No, try again |
xor eax, eax # Return NULL |
pop edi |
pop ebp |
ret |
found: |
pop eax # Get rid of those flags |
lea eax, [edi-1] # Address of byte found |
pop edi |
pop ebp |
ret |
/drivers/ddk/string/strcpy.S |
---|
0,0 → 1,24 |
# strcpy() Author: Kees J. Bot |
# 1 Jan 1994 |
# char *strcpy(char *s1, const char *s2) |
# Copy string s2 to s1. |
# |
.intel_syntax |
.global _strcpy |
.text |
.align 16 |
_strcpy: |
push ebp |
mov ebp, esp |
push esi |
push edi |
mov ecx, -1 # Unlimited length |
call _strncpy # Common code |
mov eax, [8+ebp] # Return s1 |
pop edi |
pop esi |
pop ebp |
ret |
/drivers/ddk/string/strlen.S |
---|
0,0 → 1,15 |
# strlen() Author: Kees J. Bot 1 Jan 1994 |
# size_t strlen(const char *s) |
# Return the length of a string. |
.intel_syntax |
.globl _strlen |
.text |
.align 16 |
_strlen: |
mov ecx, -1 # Unlimited length |
jmp __strnlen # Common code |
/drivers/ddk/string/strncmp.S |
---|
0,0 → 1,15 |
# strncmp() Author: Kees J. Bot 1 Jan 1994 |
# int strncmp(const char *s1, const char *s2, size_t n) |
# Compare two strings. |
# |
.intel_syntax |
.globl _strncmp |
.text |
.align 16 |
_strncmp: |
mov ecx, [esp+12] # Maximum length |
jmp __strncmp # Common code |
/drivers/ddk/string/strncpy.S |
---|
0,0 → 1,28 |
# strncpy() Author: Kees J. Bot |
# 1 Jan 1994 |
# char *strncpy(char *s1, const char *s2, size_t n) |
# Copy string s2 to s1. |
# |
.intel_syntax |
.text |
.globl _strncpy |
.align 16 |
_strncpy: |
push ebp |
mov ebp, esp |
push esi |
push edi |
mov ecx, [ebp+16] # Maximum length |
call __strncpy # Common code |
mov ecx, edx # Number of bytes not copied |
rep |
stosb # strncpy always copies n bytes by null padding |
mov eax, [ebp+8] # Return s1 |
pop edi |
pop esi |
pop ebp |
ret |
/drivers/include/types.h |
---|
File deleted |
/drivers/include/pci.h |
---|
File deleted |
/drivers/include/drm/drm.h |
---|
0,0 → 1,778 |
/** |
* \file drm.h |
* Header for the Direct Rendering Manager |
* |
* \author Rickard E. (Rik) Faith <faith@valinux.com> |
* |
* \par Acknowledgments: |
* Dec 1999, Richard Henderson <rth@twiddle.net>, move to generic \c cmpxchg. |
*/ |
/* |
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. |
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. |
* All rights reserved. |
* |
* Permission is hereby granted, free of charge, to any person obtaining a |
* copy of this software and associated documentation files (the "Software"), |
* to deal in the Software without restriction, including without limitation |
* the rights to use, copy, modify, merge, publish, distribute, sublicense, |
* and/or sell copies of the Software, and to permit persons to whom the |
* Software is furnished to do so, subject to the following conditions: |
* |
* The above copyright notice and this permission notice (including the next |
* paragraph) shall be included in all copies or substantial portions of the |
* Software. |
* |
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR |
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
* OTHER DEALINGS IN THE SOFTWARE. |
*/ |
#ifndef _DRM_H_ |
#define _DRM_H_ |
#include <linux/types.h> |
#include <errno-base.h> |
typedef unsigned int drm_handle_t; |
//#include <asm/ioctl.h> /* For _IO* macros */ |
#define DRM_MAJOR 226 |
#define DRM_MAX_MINOR 15 |
#define DRM_NAME "drm" /**< Name in kernel, /dev, and /proc */ |
#define DRM_MIN_ORDER 5 /**< At least 2^5 bytes = 32 bytes */ |
#define DRM_MAX_ORDER 22 /**< Up to 2^22 bytes = 4MB */ |
#define DRM_RAM_PERCENT 10 /**< How much system ram can we lock? */ |
#define _DRM_LOCK_HELD 0x80000000U /**< Hardware lock is held */ |
#define _DRM_LOCK_CONT 0x40000000U /**< Hardware lock is contended */ |
#define _DRM_LOCK_IS_HELD(lock) ((lock) & _DRM_LOCK_HELD) |
#define _DRM_LOCK_IS_CONT(lock) ((lock) & _DRM_LOCK_CONT) |
#define _DRM_LOCKING_CONTEXT(lock) ((lock) & ~(_DRM_LOCK_HELD|_DRM_LOCK_CONT)) |
typedef unsigned int drm_context_t; |
typedef unsigned int drm_drawable_t; |
typedef unsigned int drm_magic_t; |
/** |
* Cliprect. |
* |
* \warning: If you change this structure, make sure you change |
* XF86DRIClipRectRec in the server as well |
* |
* \note KW: Actually it's illegal to change either for |
* backwards-compatibility reasons. |
*/ |
struct drm_clip_rect { |
unsigned short x1; |
unsigned short y1; |
unsigned short x2; |
unsigned short y2; |
}; |
/** |
* Drawable information. |
*/ |
struct drm_drawable_info { |
unsigned int num_rects; |
struct drm_clip_rect *rects; |
}; |
/** |
* Texture region, |
*/ |
struct drm_tex_region { |
unsigned char next; |
unsigned char prev; |
unsigned char in_use; |
unsigned char padding; |
unsigned int age; |
}; |
/** |
* Hardware lock. |
* |
* The lock structure is a simple cache-line aligned integer. To avoid |
* processor bus contention on a multiprocessor system, there should not be any |
* other data stored in the same cache line. |
*/ |
struct drm_hw_lock { |
__volatile__ unsigned int lock; /**< lock variable */ |
char padding[60]; /**< Pad to cache line */ |
}; |
/** |
* DRM_IOCTL_VERSION ioctl argument type. |
* |
* \sa drmGetVersion(). |
*/ |
struct drm_version { |
int version_major; /**< Major version */ |
int version_minor; /**< Minor version */ |
int version_patchlevel; /**< Patch level */ |
size_t name_len; /**< Length of name buffer */ |
char __user *name; /**< Name of driver */ |
size_t date_len; /**< Length of date buffer */ |
char __user *date; /**< User-space buffer to hold date */ |
size_t desc_len; /**< Length of desc buffer */ |
char __user *desc; /**< User-space buffer to hold desc */ |
}; |
/** |
* DRM_IOCTL_GET_UNIQUE ioctl argument type. |
* |
* \sa drmGetBusid() and drmSetBusId(). |
*/ |
struct drm_unique { |
size_t unique_len; /**< Length of unique */ |
char __user *unique; /**< Unique name for driver instantiation */ |
}; |
struct drm_list { |
int count; /**< Length of user-space structures */ |
struct drm_version __user *version; |
}; |
struct drm_block { |
int unused; |
}; |
/** |
* DRM_IOCTL_CONTROL ioctl argument type. |
* |
* \sa drmCtlInstHandler() and drmCtlUninstHandler(). |
*/ |
struct drm_control { |
enum { |
DRM_ADD_COMMAND, |
DRM_RM_COMMAND, |
DRM_INST_HANDLER, |
DRM_UNINST_HANDLER |
} func; |
int irq; |
}; |
/** |
* Type of memory to map. |
*/ |
enum drm_map_type { |
_DRM_FRAME_BUFFER = 0, /**< WC (no caching), no core dump */ |
_DRM_REGISTERS = 1, /**< no caching, no core dump */ |
_DRM_SHM = 2, /**< shared, cached */ |
_DRM_AGP = 3, /**< AGP/GART */ |
_DRM_SCATTER_GATHER = 4, /**< Scatter/gather memory for PCI DMA */ |
_DRM_CONSISTENT = 5, /**< Consistent memory for PCI DMA */ |
_DRM_GEM = 6, /**< GEM object */ |
}; |
/** |
* Memory mapping flags. |
*/ |
enum drm_map_flags { |
_DRM_RESTRICTED = 0x01, /**< Cannot be mapped to user-virtual */ |
_DRM_READ_ONLY = 0x02, |
_DRM_LOCKED = 0x04, /**< shared, cached, locked */ |
_DRM_KERNEL = 0x08, /**< kernel requires access */ |
_DRM_WRITE_COMBINING = 0x10, /**< use write-combining if available */ |
_DRM_CONTAINS_LOCK = 0x20, /**< SHM page that contains lock */ |
_DRM_REMOVABLE = 0x40, /**< Removable mapping */ |
_DRM_DRIVER = 0x80 /**< Managed by driver */ |
}; |
struct drm_ctx_priv_map { |
unsigned int ctx_id; /**< Context requesting private mapping */ |
void *handle; /**< Handle of map */ |
}; |
/** |
* DRM_IOCTL_GET_MAP, DRM_IOCTL_ADD_MAP and DRM_IOCTL_RM_MAP ioctls |
* argument type. |
* |
* \sa drmAddMap(). |
*/ |
struct drm_map { |
unsigned long offset; /**< Requested physical address (0 for SAREA)*/ |
unsigned long size; /**< Requested physical size (bytes) */ |
enum drm_map_type type; /**< Type of memory to map */ |
enum drm_map_flags flags; /**< Flags */ |
void *handle; /**< User-space: "Handle" to pass to mmap() */ |
/**< Kernel-space: kernel-virtual address */ |
int mtrr; /**< MTRR slot used */ |
/* Private data */ |
}; |
/** |
* DRM_IOCTL_GET_CLIENT ioctl argument type. |
*/ |
struct drm_client { |
int idx; /**< Which client desired? */ |
int auth; /**< Is client authenticated? */ |
unsigned long pid; /**< Process ID */ |
unsigned long uid; /**< User ID */ |
unsigned long magic; /**< Magic */ |
unsigned long iocs; /**< Ioctl count */ |
}; |
enum drm_stat_type { |
_DRM_STAT_LOCK, |
_DRM_STAT_OPENS, |
_DRM_STAT_CLOSES, |
_DRM_STAT_IOCTLS, |
_DRM_STAT_LOCKS, |
_DRM_STAT_UNLOCKS, |
_DRM_STAT_VALUE, /**< Generic value */ |
_DRM_STAT_BYTE, /**< Generic byte counter (1024bytes/K) */ |
_DRM_STAT_COUNT, /**< Generic non-byte counter (1000/k) */ |
_DRM_STAT_IRQ, /**< IRQ */ |
_DRM_STAT_PRIMARY, /**< Primary DMA bytes */ |
_DRM_STAT_SECONDARY, /**< Secondary DMA bytes */ |
_DRM_STAT_DMA, /**< DMA */ |
_DRM_STAT_SPECIAL, /**< Special DMA (e.g., priority or polled) */ |
_DRM_STAT_MISSED /**< Missed DMA opportunity */ |
/* Add to the *END* of the list */ |
}; |
/** |
* DRM_IOCTL_GET_STATS ioctl argument type. |
*/ |
struct drm_stats { |
unsigned long count; |
struct { |
unsigned long value; |
enum drm_stat_type type; |
} data[15]; |
}; |
/** |
* Hardware locking flags. |
*/ |
enum drm_lock_flags { |
_DRM_LOCK_READY = 0x01, /**< Wait until hardware is ready for DMA */ |
_DRM_LOCK_QUIESCENT = 0x02, /**< Wait until hardware quiescent */ |
_DRM_LOCK_FLUSH = 0x04, /**< Flush this context's DMA queue first */ |
_DRM_LOCK_FLUSH_ALL = 0x08, /**< Flush all DMA queues first */ |
/* These *HALT* flags aren't supported yet |
-- they will be used to support the |
full-screen DGA-like mode. */ |
_DRM_HALT_ALL_QUEUES = 0x10, /**< Halt all current and future queues */ |
_DRM_HALT_CUR_QUEUES = 0x20 /**< Halt all current queues */ |
}; |
/** |
* DRM_IOCTL_LOCK, DRM_IOCTL_UNLOCK and DRM_IOCTL_FINISH ioctl argument type. |
* |
* \sa drmGetLock() and drmUnlock(). |
*/ |
struct drm_lock { |
int context; |
enum drm_lock_flags flags; |
}; |
/** |
* DMA flags |
* |
* \warning |
* These values \e must match xf86drm.h. |
* |
* \sa drm_dma. |
*/ |
enum drm_dma_flags { |
/* Flags for DMA buffer dispatch */ |
_DRM_DMA_BLOCK = 0x01, /**< |
* Block until buffer dispatched. |
* |
* \note The buffer may not yet have |
* been processed by the hardware -- |
* getting a hardware lock with the |
* hardware quiescent will ensure |
* that the buffer has been |
* processed. |
*/ |
_DRM_DMA_WHILE_LOCKED = 0x02, /**< Dispatch while lock held */ |
_DRM_DMA_PRIORITY = 0x04, /**< High priority dispatch */ |
/* Flags for DMA buffer request */ |
_DRM_DMA_WAIT = 0x10, /**< Wait for free buffers */ |
_DRM_DMA_SMALLER_OK = 0x20, /**< Smaller-than-requested buffers OK */ |
_DRM_DMA_LARGER_OK = 0x40 /**< Larger-than-requested buffers OK */ |
}; |
/** |
* DRM_IOCTL_ADD_BUFS and DRM_IOCTL_MARK_BUFS ioctl argument type. |
* |
* \sa drmAddBufs(). |
*/ |
struct drm_buf_desc { |
int count; /**< Number of buffers of this size */ |
int size; /**< Size in bytes */ |
int low_mark; /**< Low water mark */ |
int high_mark; /**< High water mark */ |
enum { |
_DRM_PAGE_ALIGN = 0x01, /**< Align on page boundaries for DMA */ |
_DRM_AGP_BUFFER = 0x02, /**< Buffer is in AGP space */ |
_DRM_SG_BUFFER = 0x04, /**< Scatter/gather memory buffer */ |
_DRM_FB_BUFFER = 0x08, /**< Buffer is in frame buffer */ |
_DRM_PCI_BUFFER_RO = 0x10 /**< Map PCI DMA buffer read-only */ |
} flags; |
unsigned long agp_start; /**< |
* Start address of where the AGP buffers are |
* in the AGP aperture |
*/ |
}; |
/** |
* DRM_IOCTL_INFO_BUFS ioctl argument type. |
*/ |
struct drm_buf_info { |
int count; /**< Entries in list */ |
struct drm_buf_desc __user *list; |
}; |
/** |
* DRM_IOCTL_FREE_BUFS ioctl argument type. |
*/ |
struct drm_buf_free { |
int count; |
int __user *list; |
}; |
/** |
* Buffer information |
* |
* \sa drm_buf_map. |
*/ |
struct drm_buf_pub { |
int idx; /**< Index into the master buffer list */ |
int total; /**< Buffer size */ |
int used; /**< Amount of buffer in use (for DMA) */ |
void __user *address; /**< Address of buffer */ |
}; |
/** |
* DRM_IOCTL_MAP_BUFS ioctl argument type. |
*/ |
struct drm_buf_map { |
int count; /**< Length of the buffer list */ |
void __user *virtual; /**< Mmap'd area in user-virtual */ |
struct drm_buf_pub __user *list; /**< Buffer information */ |
}; |
/** |
* DRM_IOCTL_DMA ioctl argument type. |
* |
* Indices here refer to the offset into the buffer list in drm_buf_get. |
* |
* \sa drmDMA(). |
*/ |
struct drm_dma { |
int context; /**< Context handle */ |
int send_count; /**< Number of buffers to send */ |
int __user *send_indices; /**< List of handles to buffers */ |
int __user *send_sizes; /**< Lengths of data to send */ |
enum drm_dma_flags flags; /**< Flags */ |
int request_count; /**< Number of buffers requested */ |
int request_size; /**< Desired size for buffers */ |
int __user *request_indices; /**< Buffer information */ |
int __user *request_sizes; |
int granted_count; /**< Number of buffers granted */ |
}; |
enum drm_ctx_flags { |
_DRM_CONTEXT_PRESERVED = 0x01, |
_DRM_CONTEXT_2DONLY = 0x02 |
}; |
/** |
* DRM_IOCTL_ADD_CTX ioctl argument type. |
* |
* \sa drmCreateContext() and drmDestroyContext(). |
*/ |
struct drm_ctx { |
drm_context_t handle; |
enum drm_ctx_flags flags; |
}; |
/** |
* DRM_IOCTL_RES_CTX ioctl argument type. |
*/ |
struct drm_ctx_res { |
int count; |
struct drm_ctx __user *contexts; |
}; |
/** |
* DRM_IOCTL_ADD_DRAW and DRM_IOCTL_RM_DRAW ioctl argument type. |
*/ |
struct drm_draw { |
drm_drawable_t handle; |
}; |
/** |
* DRM_IOCTL_UPDATE_DRAW ioctl argument type. |
*/ |
typedef enum { |
DRM_DRAWABLE_CLIPRECTS, |
} drm_drawable_info_type_t; |
struct drm_update_draw { |
drm_drawable_t handle; |
unsigned int type; |
unsigned int num; |
unsigned long long data; |
}; |
/** |
* DRM_IOCTL_GET_MAGIC and DRM_IOCTL_AUTH_MAGIC ioctl argument type. |
*/ |
struct drm_auth { |
drm_magic_t magic; |
}; |
/** |
* DRM_IOCTL_IRQ_BUSID ioctl argument type. |
* |
* \sa drmGetInterruptFromBusID(). |
*/ |
struct drm_irq_busid { |
int irq; /**< IRQ number */ |
int busnum; /**< bus number */ |
int devnum; /**< device number */ |
int funcnum; /**< function number */ |
}; |
enum drm_vblank_seq_type { |
_DRM_VBLANK_ABSOLUTE = 0x0, /**< Wait for specific vblank sequence number */ |
_DRM_VBLANK_RELATIVE = 0x1, /**< Wait for given number of vblanks */ |
_DRM_VBLANK_EVENT = 0x4000000, /**< Send event instead of blocking */ |
_DRM_VBLANK_FLIP = 0x8000000, /**< Scheduled buffer swap should flip */ |
_DRM_VBLANK_NEXTONMISS = 0x10000000, /**< If missed, wait for next vblank */ |
_DRM_VBLANK_SECONDARY = 0x20000000, /**< Secondary display controller */ |
_DRM_VBLANK_SIGNAL = 0x40000000 /**< Send signal instead of blocking, unsupported */ |
}; |
#define _DRM_VBLANK_TYPES_MASK (_DRM_VBLANK_ABSOLUTE | _DRM_VBLANK_RELATIVE) |
#define _DRM_VBLANK_FLAGS_MASK (_DRM_VBLANK_EVENT | _DRM_VBLANK_SIGNAL | \ |
_DRM_VBLANK_SECONDARY | _DRM_VBLANK_NEXTONMISS) |
struct drm_wait_vblank_request { |
enum drm_vblank_seq_type type; |
unsigned int sequence; |
unsigned long signal; |
}; |
struct drm_wait_vblank_reply { |
enum drm_vblank_seq_type type; |
unsigned int sequence; |
long tval_sec; |
long tval_usec; |
}; |
/** |
* DRM_IOCTL_WAIT_VBLANK ioctl argument type. |
* |
* \sa drmWaitVBlank(). |
*/ |
union drm_wait_vblank { |
struct drm_wait_vblank_request request; |
struct drm_wait_vblank_reply reply; |
}; |
#define _DRM_PRE_MODESET 1 |
#define _DRM_POST_MODESET 2 |
/** |
* DRM_IOCTL_MODESET_CTL ioctl argument type |
* |
* \sa drmModesetCtl(). |
*/ |
struct drm_modeset_ctl { |
__u32 crtc; |
__u32 cmd; |
}; |
/** |
* DRM_IOCTL_AGP_ENABLE ioctl argument type. |
* |
* \sa drmAgpEnable(). |
*/ |
struct drm_agp_mode { |
unsigned long mode; /**< AGP mode */ |
}; |
/** |
* DRM_IOCTL_AGP_ALLOC and DRM_IOCTL_AGP_FREE ioctls argument type. |
* |
* \sa drmAgpAlloc() and drmAgpFree(). |
*/ |
struct drm_agp_buffer { |
unsigned long size; /**< In bytes -- will round to page boundary */ |
unsigned long handle; /**< Used for binding / unbinding */ |
unsigned long type; /**< Type of memory to allocate */ |
unsigned long physical; /**< Physical used by i810 */ |
}; |
/** |
* DRM_IOCTL_AGP_BIND and DRM_IOCTL_AGP_UNBIND ioctls argument type. |
* |
* \sa drmAgpBind() and drmAgpUnbind(). |
*/ |
struct drm_agp_binding { |
unsigned long handle; /**< From drm_agp_buffer */ |
unsigned long offset; /**< In bytes -- will round to page boundary */ |
}; |
/** |
* DRM_IOCTL_AGP_INFO ioctl argument type. |
* |
* \sa drmAgpVersionMajor(), drmAgpVersionMinor(), drmAgpGetMode(), |
* drmAgpBase(), drmAgpSize(), drmAgpMemoryUsed(), drmAgpMemoryAvail(), |
* drmAgpVendorId() and drmAgpDeviceId(). |
*/ |
struct drm_agp_info { |
int agp_version_major; |
int agp_version_minor; |
unsigned long mode; |
unsigned long aperture_base; /* physical address */ |
unsigned long aperture_size; /* bytes */ |
unsigned long memory_allowed; /* bytes */ |
unsigned long memory_used; |
/* PCI information */ |
unsigned short id_vendor; |
unsigned short id_device; |
}; |
/** |
* DRM_IOCTL_SG_ALLOC ioctl argument type. |
*/ |
struct drm_scatter_gather { |
unsigned long size; /**< In bytes -- will round to page boundary */ |
unsigned long handle; /**< Used for mapping / unmapping */ |
}; |
/** |
* DRM_IOCTL_SET_VERSION ioctl argument type. |
*/ |
struct drm_set_version { |
int drm_di_major; |
int drm_di_minor; |
int drm_dd_major; |
int drm_dd_minor; |
}; |
/** DRM_IOCTL_GEM_CLOSE ioctl argument type */ |
struct drm_gem_close { |
/** Handle of the object to be closed. */ |
__u32 handle; |
__u32 pad; |
}; |
/** DRM_IOCTL_GEM_FLINK ioctl argument type */ |
struct drm_gem_flink { |
/** Handle for the object being named */ |
__u32 handle; |
/** Returned global name */ |
__u32 name; |
}; |
/** DRM_IOCTL_GEM_OPEN ioctl argument type */ |
struct drm_gem_open { |
/** Name of object being opened */ |
__u32 name; |
/** Returned handle for the object */ |
__u32 handle; |
/** Returned size of the object */ |
__u64 size; |
}; |
#include "drm_mode.h" |
/* |
#define DRM_IOCTL_BASE 'd' |
#define DRM_IO(nr) _IO(DRM_IOCTL_BASE,nr) |
#define DRM_IOR(nr,type) _IOR(DRM_IOCTL_BASE,nr,type) |
#define DRM_IOW(nr,type) _IOW(DRM_IOCTL_BASE,nr,type) |
#define DRM_IOWR(nr,type) _IOWR(DRM_IOCTL_BASE,nr,type) |
#define DRM_IOCTL_VERSION DRM_IOWR(0x00, struct drm_version) |
#define DRM_IOCTL_GET_UNIQUE DRM_IOWR(0x01, struct drm_unique) |
#define DRM_IOCTL_GET_MAGIC DRM_IOR( 0x02, struct drm_auth) |
#define DRM_IOCTL_IRQ_BUSID DRM_IOWR(0x03, struct drm_irq_busid) |
#define DRM_IOCTL_GET_MAP DRM_IOWR(0x04, struct drm_map) |
#define DRM_IOCTL_GET_CLIENT DRM_IOWR(0x05, struct drm_client) |
#define DRM_IOCTL_GET_STATS DRM_IOR( 0x06, struct drm_stats) |
#define DRM_IOCTL_SET_VERSION DRM_IOWR(0x07, struct drm_set_version) |
#define DRM_IOCTL_MODESET_CTL DRM_IOW(0x08, struct drm_modeset_ctl) |
#define DRM_IOCTL_GEM_CLOSE DRM_IOW (0x09, struct drm_gem_close) |
#define DRM_IOCTL_GEM_FLINK DRM_IOWR(0x0a, struct drm_gem_flink) |
#define DRM_IOCTL_GEM_OPEN DRM_IOWR(0x0b, struct drm_gem_open) |
#define DRM_IOCTL_SET_UNIQUE DRM_IOW( 0x10, struct drm_unique) |
#define DRM_IOCTL_AUTH_MAGIC DRM_IOW( 0x11, struct drm_auth) |
#define DRM_IOCTL_BLOCK DRM_IOWR(0x12, struct drm_block) |
#define DRM_IOCTL_UNBLOCK DRM_IOWR(0x13, struct drm_block) |
#define DRM_IOCTL_CONTROL DRM_IOW( 0x14, struct drm_control) |
#define DRM_IOCTL_ADD_MAP DRM_IOWR(0x15, struct drm_map) |
#define DRM_IOCTL_ADD_BUFS DRM_IOWR(0x16, struct drm_buf_desc) |
#define DRM_IOCTL_MARK_BUFS DRM_IOW( 0x17, struct drm_buf_desc) |
#define DRM_IOCTL_INFO_BUFS DRM_IOWR(0x18, struct drm_buf_info) |
#define DRM_IOCTL_MAP_BUFS DRM_IOWR(0x19, struct drm_buf_map) |
#define DRM_IOCTL_FREE_BUFS DRM_IOW( 0x1a, struct drm_buf_free) |
#define DRM_IOCTL_RM_MAP DRM_IOW( 0x1b, struct drm_map) |
#define DRM_IOCTL_SET_SAREA_CTX DRM_IOW( 0x1c, struct drm_ctx_priv_map) |
#define DRM_IOCTL_GET_SAREA_CTX DRM_IOWR(0x1d, struct drm_ctx_priv_map) |
#define DRM_IOCTL_SET_MASTER DRM_IO(0x1e) |
#define DRM_IOCTL_DROP_MASTER DRM_IO(0x1f) |
#define DRM_IOCTL_ADD_CTX DRM_IOWR(0x20, struct drm_ctx) |
#define DRM_IOCTL_RM_CTX DRM_IOWR(0x21, struct drm_ctx) |
#define DRM_IOCTL_MOD_CTX DRM_IOW( 0x22, struct drm_ctx) |
#define DRM_IOCTL_GET_CTX DRM_IOWR(0x23, struct drm_ctx) |
#define DRM_IOCTL_SWITCH_CTX DRM_IOW( 0x24, struct drm_ctx) |
#define DRM_IOCTL_NEW_CTX DRM_IOW( 0x25, struct drm_ctx) |
#define DRM_IOCTL_RES_CTX DRM_IOWR(0x26, struct drm_ctx_res) |
#define DRM_IOCTL_ADD_DRAW DRM_IOWR(0x27, struct drm_draw) |
#define DRM_IOCTL_RM_DRAW DRM_IOWR(0x28, struct drm_draw) |
#define DRM_IOCTL_DMA DRM_IOWR(0x29, struct drm_dma) |
#define DRM_IOCTL_LOCK DRM_IOW( 0x2a, struct drm_lock) |
#define DRM_IOCTL_UNLOCK DRM_IOW( 0x2b, struct drm_lock) |
#define DRM_IOCTL_FINISH DRM_IOW( 0x2c, struct drm_lock) |
#define DRM_IOCTL_AGP_ACQUIRE DRM_IO( 0x30) |
#define DRM_IOCTL_AGP_RELEASE DRM_IO( 0x31) |
#define DRM_IOCTL_AGP_ENABLE DRM_IOW( 0x32, struct drm_agp_mode) |
#define DRM_IOCTL_AGP_INFO DRM_IOR( 0x33, struct drm_agp_info) |
#define DRM_IOCTL_AGP_ALLOC DRM_IOWR(0x34, struct drm_agp_buffer) |
#define DRM_IOCTL_AGP_FREE DRM_IOW( 0x35, struct drm_agp_buffer) |
#define DRM_IOCTL_AGP_BIND DRM_IOW( 0x36, struct drm_agp_binding) |
#define DRM_IOCTL_AGP_UNBIND DRM_IOW( 0x37, struct drm_agp_binding) |
#define DRM_IOCTL_SG_ALLOC DRM_IOWR(0x38, struct drm_scatter_gather) |
#define DRM_IOCTL_SG_FREE DRM_IOW( 0x39, struct drm_scatter_gather) |
#define DRM_IOCTL_WAIT_VBLANK DRM_IOWR(0x3a, union drm_wait_vblank) |
#define DRM_IOCTL_UPDATE_DRAW DRM_IOW(0x3f, struct drm_update_draw) |
#define DRM_IOCTL_MODE_GETRESOURCES DRM_IOWR(0xA0, struct drm_mode_card_res) |
#define DRM_IOCTL_MODE_GETCRTC DRM_IOWR(0xA1, struct drm_mode_crtc) |
#define DRM_IOCTL_MODE_SETCRTC DRM_IOWR(0xA2, struct drm_mode_crtc) |
#define DRM_IOCTL_MODE_CURSOR DRM_IOWR(0xA3, struct drm_mode_cursor) |
#define DRM_IOCTL_MODE_GETGAMMA DRM_IOWR(0xA4, struct drm_mode_crtc_lut) |
#define DRM_IOCTL_MODE_SETGAMMA DRM_IOWR(0xA5, struct drm_mode_crtc_lut) |
#define DRM_IOCTL_MODE_GETENCODER DRM_IOWR(0xA6, struct drm_mode_get_encoder) |
#define DRM_IOCTL_MODE_GETCONNECTOR DRM_IOWR(0xA7, struct drm_mode_get_connector) |
#define DRM_IOCTL_MODE_ATTACHMODE DRM_IOWR(0xA8, struct drm_mode_mode_cmd) |
#define DRM_IOCTL_MODE_DETACHMODE DRM_IOWR(0xA9, struct drm_mode_mode_cmd) |
#define DRM_IOCTL_MODE_GETPROPERTY DRM_IOWR(0xAA, struct drm_mode_get_property) |
#define DRM_IOCTL_MODE_SETPROPERTY DRM_IOWR(0xAB, struct drm_mode_connector_set_property) |
#define DRM_IOCTL_MODE_GETPROPBLOB DRM_IOWR(0xAC, struct drm_mode_get_blob) |
#define DRM_IOCTL_MODE_GETFB DRM_IOWR(0xAD, struct drm_mode_fb_cmd) |
#define DRM_IOCTL_MODE_ADDFB DRM_IOWR(0xAE, struct drm_mode_fb_cmd) |
#define DRM_IOCTL_MODE_RMFB DRM_IOWR(0xAF, unsigned int) |
#define DRM_IOCTL_MODE_PAGE_FLIP DRM_IOWR(0xB0, struct drm_mode_crtc_page_flip) |
*/ |
/** |
* Device specific ioctls should only be in their respective headers |
* The device specific ioctl range is from 0x40 to 0x99. |
* Generic IOCTLS restart at 0xA0. |
* |
* \sa drmCommandNone(), drmCommandRead(), drmCommandWrite(), and |
* drmCommandReadWrite(). |
*/ |
#define DRM_COMMAND_BASE 0x40 |
#define DRM_COMMAND_END 0xA0 |
/** |
* Header for events written back to userspace on the drm fd. The |
* type defines the type of event, the length specifies the total |
* length of the event (including the header), and user_data is |
* typically a 64 bit value passed with the ioctl that triggered the |
* event. A read on the drm fd will always only return complete |
* events, that is, if for example the read buffer is 100 bytes, and |
* there are two 64 byte events pending, only one will be returned. |
* |
* Event types 0 - 0x7fffffff are generic drm events, 0x80000000 and |
* up are chipset specific. |
*/ |
struct drm_event { |
__u32 type; |
__u32 length; |
}; |
#define DRM_EVENT_VBLANK 0x01 |
#define DRM_EVENT_FLIP_COMPLETE 0x02 |
struct drm_event_vblank { |
struct drm_event base; |
__u64 user_data; |
__u32 tv_sec; |
__u32 tv_usec; |
__u32 sequence; |
__u32 reserved; |
}; |
/* typedef area */ |
#ifndef __KERNEL__ |
typedef struct drm_clip_rect drm_clip_rect_t; |
typedef struct drm_drawable_info drm_drawable_info_t; |
typedef struct drm_tex_region drm_tex_region_t; |
typedef struct drm_hw_lock drm_hw_lock_t; |
typedef struct drm_version drm_version_t; |
typedef struct drm_unique drm_unique_t; |
typedef struct drm_list drm_list_t; |
typedef struct drm_block drm_block_t; |
typedef struct drm_control drm_control_t; |
typedef enum drm_map_type drm_map_type_t; |
typedef enum drm_map_flags drm_map_flags_t; |
typedef struct drm_ctx_priv_map drm_ctx_priv_map_t; |
typedef struct drm_map drm_map_t; |
typedef struct drm_client drm_client_t; |
typedef enum drm_stat_type drm_stat_type_t; |
typedef struct drm_stats drm_stats_t; |
typedef enum drm_lock_flags drm_lock_flags_t; |
typedef struct drm_lock drm_lock_t; |
typedef enum drm_dma_flags drm_dma_flags_t; |
typedef struct drm_buf_desc drm_buf_desc_t; |
typedef struct drm_buf_info drm_buf_info_t; |
typedef struct drm_buf_free drm_buf_free_t; |
typedef struct drm_buf_pub drm_buf_pub_t; |
typedef struct drm_buf_map drm_buf_map_t; |
typedef struct drm_dma drm_dma_t; |
typedef union drm_wait_vblank drm_wait_vblank_t; |
typedef struct drm_agp_mode drm_agp_mode_t; |
typedef enum drm_ctx_flags drm_ctx_flags_t; |
typedef struct drm_ctx drm_ctx_t; |
typedef struct drm_ctx_res drm_ctx_res_t; |
typedef struct drm_draw drm_draw_t; |
typedef struct drm_update_draw drm_update_draw_t; |
typedef struct drm_auth drm_auth_t; |
typedef struct drm_irq_busid drm_irq_busid_t; |
typedef enum drm_vblank_seq_type drm_vblank_seq_type_t; |
typedef struct drm_agp_buffer drm_agp_buffer_t; |
typedef struct drm_agp_binding drm_agp_binding_t; |
typedef struct drm_agp_info drm_agp_info_t; |
typedef struct drm_scatter_gather drm_scatter_gather_t; |
typedef struct drm_set_version drm_set_version_t; |
#endif |
#define mutex_lock(x) |
#define mutex_unlock(x) |
#endif |
/drivers/include/drm/drmP.h |
---|
0,0 → 1,1604 |
/** |
* \file drmP.h |
* Private header for Direct Rendering Manager |
* |
* \author Rickard E. (Rik) Faith <faith@valinux.com> |
* \author Gareth Hughes <gareth@valinux.com> |
*/ |
/* |
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. |
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. |
* All rights reserved. |
* |
* Permission is hereby granted, free of charge, to any person obtaining a |
* copy of this software and associated documentation files (the "Software"), |
* to deal in the Software without restriction, including without limitation |
* the rights to use, copy, modify, merge, publish, distribute, sublicense, |
* and/or sell copies of the Software, and to permit persons to whom the |
* Software is furnished to do so, subject to the following conditions: |
* |
* The above copyright notice and this permission notice (including the next |
* paragraph) shall be included in all copies or substantial portions of the |
* Software. |
* |
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR |
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
* OTHER DEALINGS IN THE SOFTWARE. |
*/ |
#ifndef _DRM_P_H_ |
#define _DRM_P_H_ |
#ifdef __KERNEL__ |
#ifdef __alpha__ |
/* add include of current.h so that "current" is defined |
* before static inline funcs in wait.h. Doing this so we |
* can build the DRM (part of PI DRI). 4/21/2000 S + B */ |
#include <asm/current.h> |
#endif /* __alpha__ */ |
#include <linux/module.h> |
#include <linux/kernel.h> |
#include <linux/kref.h> |
#include <linux/spinlock.h> |
//#include <linux/miscdevice.h> |
//#include <linux/fs.h> |
//#include <linux/proc_fs.h> |
//#include <linux/init.h> |
//#include <linux/file.h> |
#include <linux/pci.h> |
//#include <linux/jiffies.h> |
//#include <linux/smp_lock.h> /* For (un)lock_kernel */ |
//#include <linux/dma-mapping.h> |
//#include <linux/mm.h> |
//#include <linux/cdev.h> |
//#include <linux/mutex.h> |
//#include <asm/io.h> |
//#include <asm/mman.h> |
//#include <asm/uaccess.h> |
//#include <linux/workqueue.h> |
//#include <linux/poll.h> |
//#include <asm/pgalloc.h> |
#include "drm.h" |
#include <linux/idr.h> |
#define __OS_HAS_AGP (defined(CONFIG_AGP) || (defined(CONFIG_AGP_MODULE) && defined(MODULE))) |
#define __OS_HAS_MTRR (defined(CONFIG_MTRR)) |
#include <drm_edid.h> |
#include <drm_crtc.h> |
struct drm_file; |
struct drm_device; |
//#include "drm_os_linux.h" |
#include "drm_hashtab.h" |
#include "drm_mm.h" |
#define DRM_UT_CORE 0x01 |
#define DRM_UT_DRIVER 0x02 |
#define DRM_UT_KMS 0x04 |
#define DRM_UT_MODE 0x08 |
#define KHZ2PICOS(a) (1000000000UL/(a)) |
extern void drm_ut_debug_printk(unsigned int request_level, |
const char *prefix, |
const char *function_name, |
const char *format, ...); |
#define DRM_DEBUG_MODE(prefix, fmt, args...) \ |
do { \ |
dbgprintf("drm debug: %s" fmt, \ |
__func__, ##args); \ |
} while (0) |
#define DRM_DEBUG(fmt, args...) \ |
do { \ |
printk("[" DRM_NAME ":%s] " fmt , __func__ , ##args); \ |
} while(0) |
#define DRM_DEBUG_KMS(fmt, args...) \ |
do { \ |
printk("[" DRM_NAME ":%s] " fmt , __func__ , ##args); \ |
} while(0) |
#define dev_err(dev, format, arg...) \ |
printk("Error %s " format, __func__ , ## arg) |
#define dev_warn(dev, format, arg...) \ |
printk("Warning %s " format, __func__ , ## arg) |
#define dev_info(dev, format, arg...) \ |
printk("Info %s " format , __func__, ## arg) |
/** |
* This structure defines the drm_mm memory object, which will be used by the |
* DRM for its buffer objects. |
*/ |
struct drm_gem_object { |
/** Reference count of this object */ |
struct kref refcount; |
/** Handle count of this object. Each handle also holds a reference */ |
struct kref handlecount; |
/** Related drm device */ |
struct drm_device *dev; |
/** File representing the shmem storage */ |
// struct file *filp; |
/* Mapping info for this object */ |
// struct drm_map_list map_list; |
/** |
* Size of the object, in bytes. Immutable over the object's |
* lifetime. |
*/ |
size_t size; |
/** |
* Global name for this object, starts at 1. 0 means unnamed. |
* Access is covered by the object_name_lock in the related drm_device |
*/ |
int name; |
/** |
* Memory domains. These monitor which caches contain read/write data |
* related to the object. When transitioning from one set of domains |
* to another, the driver is called to ensure that caches are suitably |
* flushed and invalidated |
*/ |
uint32_t read_domains; |
uint32_t write_domain; |
/** |
* While validating an exec operation, the |
* new read/write domain values are computed here. |
* They will be transferred to the above values |
* at the point that any cache flushing occurs |
*/ |
uint32_t pending_read_domains; |
uint32_t pending_write_domain; |
void *driver_private; |
}; |
static inline int drm_sysfs_connector_add(struct drm_connector *connector) |
{ return 0; }; |
static inline void drm_sysfs_connector_remove(struct drm_connector *connector) |
{ }; |
#if 0 |
/***********************************************************************/ |
/** \name DRM template customization defaults */ |
/*@{*/ |
/* driver capabilities and requirements mask */ |
#define DRIVER_USE_AGP 0x1 |
#define DRIVER_REQUIRE_AGP 0x2 |
#define DRIVER_USE_MTRR 0x4 |
#define DRIVER_PCI_DMA 0x8 |
#define DRIVER_SG 0x10 |
#define DRIVER_HAVE_DMA 0x20 |
#define DRIVER_HAVE_IRQ 0x40 |
#define DRIVER_IRQ_SHARED 0x80 |
#define DRIVER_IRQ_VBL 0x100 |
#define DRIVER_DMA_QUEUE 0x200 |
#define DRIVER_FB_DMA 0x400 |
#define DRIVER_IRQ_VBL2 0x800 |
#define DRIVER_GEM 0x1000 |
#define DRIVER_MODESET 0x2000 |
/***********************************************************************/ |
/** \name Begin the DRM... */ |
/*@{*/ |
#define DRM_DEBUG_CODE 2 /**< Include debugging code if > 1, then |
also include looping detection. */ |
#define DRM_MAGIC_HASH_ORDER 4 /**< Size of key hash table. Must be power of 2. */ |
#define DRM_KERNEL_CONTEXT 0 /**< Change drm_resctx if changed */ |
#define DRM_RESERVED_CONTEXTS 1 /**< Change drm_resctx if changed */ |
#define DRM_LOOPING_LIMIT 5000000 |
#define DRM_TIME_SLICE (HZ/20) /**< Time slice for GLXContexts */ |
#define DRM_LOCK_SLICE 1 /**< Time slice for lock, in jiffies */ |
#define DRM_FLAG_DEBUG 0x01 |
#define DRM_MAX_CTXBITMAP (PAGE_SIZE * 8) |
#define DRM_MAP_HASH_OFFSET 0x10000000 |
/*@}*/ |
/***********************************************************************/ |
/** \name Macros to make printk easier */ |
/*@{*/ |
/** |
* Error output. |
* |
* \param fmt printf() like format string. |
* \param arg arguments |
*/ |
#define DRM_ERROR(fmt, arg...) \ |
printk(KERN_ERR "[" DRM_NAME ":%s] *ERROR* " fmt , __func__ , ##arg) |
/** |
* Memory error output. |
* |
* \param area memory area where the error occurred. |
* \param fmt printf() like format string. |
* \param arg arguments |
*/ |
#define DRM_MEM_ERROR(area, fmt, arg...) \ |
printk(KERN_ERR "[" DRM_NAME ":%s:%s] *ERROR* " fmt , __func__, \ |
drm_mem_stats[area].name , ##arg) |
#define DRM_INFO(fmt, arg...) printk(KERN_INFO "[" DRM_NAME "] " fmt , ##arg) |
/** |
* Debug output. |
* |
* \param fmt printf() like format string. |
* \param arg arguments |
*/ |
#if DRM_DEBUG_CODE |
#define DRM_DEBUG(fmt, args...) \ |
do { \ |
drm_ut_debug_printk(DRM_UT_CORE, DRM_NAME, \ |
__func__, fmt, ##args); \ |
} while (0) |
#define DRM_DEBUG_DRIVER(fmt, args...) \ |
do { \ |
drm_ut_debug_printk(DRM_UT_DRIVER, DRM_NAME, \ |
__func__, fmt, ##args); \ |
} while (0) |
#define DRM_DEBUG_KMS(fmt, args...) \ |
do { \ |
drm_ut_debug_printk(DRM_UT_KMS, DRM_NAME, \ |
__func__, fmt, ##args); \ |
} while (0) |
#define DRM_LOG(fmt, args...) \ |
do { \ |
drm_ut_debug_printk(DRM_UT_CORE, NULL, \ |
NULL, fmt, ##args); \ |
} while (0) |
#define DRM_LOG_KMS(fmt, args...) \ |
do { \ |
drm_ut_debug_printk(DRM_UT_KMS, NULL, \ |
NULL, fmt, ##args); \ |
} while (0) |
#define DRM_LOG_MODE(fmt, args...) \ |
do { \ |
drm_ut_debug_printk(DRM_UT_MODE, NULL, \ |
NULL, fmt, ##args); \ |
} while (0) |
#define DRM_LOG_DRIVER(fmt, args...) \ |
do { \ |
drm_ut_debug_printk(DRM_UT_DRIVER, NULL, \ |
NULL, fmt, ##args); \ |
} while (0) |
#else |
#define DRM_DEBUG_DRIVER(fmt, args...) do { } while (0) |
#define DRM_DEBUG_KMS(fmt, args...) do { } while (0) |
#define DRM_DEBUG(fmt, arg...) do { } while (0) |
#define DRM_LOG(fmt, arg...) do { } while (0) |
#define DRM_LOG_KMS(fmt, args...) do { } while (0) |
#define DRM_LOG_MODE(fmt, arg...) do { } while (0) |
#define DRM_LOG_DRIVER(fmt, arg...) do { } while (0) |
#endif |
/*@}*/ |
/***********************************************************************/ |
/** \name Internal types and structures */ |
/*@{*/ |
#define DRM_ARRAY_SIZE(x) ARRAY_SIZE(x) |
#define DRM_LEFTCOUNT(x) (((x)->rp + (x)->count - (x)->wp) % ((x)->count + 1)) |
#define DRM_BUFCOUNT(x) ((x)->count - DRM_LEFTCOUNT(x)) |
#define DRM_IF_VERSION(maj, min) (maj << 16 | min) |
/** |
* Test that the hardware lock is held by the caller, returning otherwise. |
* |
* \param dev DRM device. |
* \param filp file pointer of the caller. |
*/ |
#define LOCK_TEST_WITH_RETURN( dev, _file_priv ) \ |
do { \ |
if (!_DRM_LOCK_IS_HELD(_file_priv->master->lock.hw_lock->lock) || \ |
_file_priv->master->lock.file_priv != _file_priv) { \ |
DRM_ERROR( "%s called without lock held, held %d owner %p %p\n",\ |
__func__, _DRM_LOCK_IS_HELD(_file_priv->master->lock.hw_lock->lock),\ |
_file_priv->master->lock.file_priv, _file_priv); \ |
return -EINVAL; \ |
} \ |
} while (0) |
/** |
* Ioctl function type. |
* |
* \param inode device inode. |
* \param file_priv DRM file private pointer. |
* \param cmd command. |
* \param arg argument. |
*/ |
typedef int drm_ioctl_t(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd, |
unsigned long arg); |
#define DRM_IOCTL_NR(n) _IOC_NR(n) |
#define DRM_MAJOR 226 |
#define DRM_AUTH 0x1 |
#define DRM_MASTER 0x2 |
#define DRM_ROOT_ONLY 0x4 |
#define DRM_CONTROL_ALLOW 0x8 |
#define DRM_UNLOCKED 0x10 |
struct drm_ioctl_desc { |
unsigned int cmd; |
int flags; |
drm_ioctl_t *func; |
}; |
/** |
* Creates a driver or general drm_ioctl_desc array entry for the given |
* ioctl, for use by drm_ioctl(). |
*/ |
#define DRM_IOCTL_DEF(ioctl, _func, _flags) \ |
[DRM_IOCTL_NR(ioctl)] = {.cmd = ioctl, .func = _func, .flags = _flags} |
struct drm_magic_entry { |
struct list_head head; |
struct drm_hash_item hash_item; |
struct drm_file *priv; |
}; |
struct drm_vma_entry { |
struct list_head head; |
struct vm_area_struct *vma; |
pid_t pid; |
}; |
/** |
* DMA buffer. |
*/ |
struct drm_buf { |
int idx; /**< Index into master buflist */ |
int total; /**< Buffer size */ |
int order; /**< log-base-2(total) */ |
int used; /**< Amount of buffer in use (for DMA) */ |
unsigned long offset; /**< Byte offset (used internally) */ |
void *address; /**< Address of buffer */ |
unsigned long bus_address; /**< Bus address of buffer */ |
struct drm_buf *next; /**< Kernel-only: used for free list */ |
__volatile__ int waiting; /**< On kernel DMA queue */ |
__volatile__ int pending; /**< On hardware DMA queue */ |
wait_queue_head_t dma_wait; /**< Processes waiting */ |
struct drm_file *file_priv; /**< Private of holding file descr */ |
int context; /**< Kernel queue for this buffer */ |
int while_locked; /**< Dispatch this buffer while locked */ |
enum { |
DRM_LIST_NONE = 0, |
DRM_LIST_FREE = 1, |
DRM_LIST_WAIT = 2, |
DRM_LIST_PEND = 3, |
DRM_LIST_PRIO = 4, |
DRM_LIST_RECLAIM = 5 |
} list; /**< Which list we're on */ |
int dev_priv_size; /**< Size of buffer private storage */ |
void *dev_private; /**< Per-buffer private storage */ |
}; |
/** bufs is one longer than it has to be */ |
struct drm_waitlist { |
int count; /**< Number of possible buffers */ |
struct drm_buf **bufs; /**< List of pointers to buffers */ |
struct drm_buf **rp; /**< Read pointer */ |
struct drm_buf **wp; /**< Write pointer */ |
struct drm_buf **end; /**< End pointer */ |
spinlock_t read_lock; |
spinlock_t write_lock; |
}; |
struct drm_freelist { |
int initialized; /**< Freelist in use */ |
atomic_t count; /**< Number of free buffers */ |
struct drm_buf *next; /**< End pointer */ |
wait_queue_head_t waiting; /**< Processes waiting on free bufs */ |
int low_mark; /**< Low water mark */ |
int high_mark; /**< High water mark */ |
atomic_t wfh; /**< If waiting for high mark */ |
spinlock_t lock; |
}; |
typedef struct drm_dma_handle { |
dma_addr_t busaddr; |
void *vaddr; |
size_t size; |
} drm_dma_handle_t; |
/** |
* Buffer entry. There is one of this for each buffer size order. |
*/ |
struct drm_buf_entry { |
int buf_size; /**< size */ |
int buf_count; /**< number of buffers */ |
struct drm_buf *buflist; /**< buffer list */ |
int seg_count; |
int page_order; |
struct drm_dma_handle **seglist; |
struct drm_freelist freelist; |
}; |
/* Event queued up for userspace to read */ |
struct drm_pending_event { |
struct drm_event *event; |
struct list_head link; |
struct drm_file *file_priv; |
void (*destroy)(struct drm_pending_event *event); |
}; |
/** File private data */ |
struct drm_file { |
int authenticated; |
pid_t pid; |
uid_t uid; |
drm_magic_t magic; |
unsigned long ioctl_count; |
struct list_head lhead; |
struct drm_minor *minor; |
unsigned long lock_count; |
/** Mapping of mm object handles to object pointers. */ |
struct idr object_idr; |
/** Lock for synchronization of access to object_idr. */ |
spinlock_t table_lock; |
struct file *filp; |
void *driver_priv; |
int is_master; /* this file private is a master for a minor */ |
struct drm_master *master; /* master this node is currently associated with |
N.B. not always minor->master */ |
struct list_head fbs; |
wait_queue_head_t event_wait; |
struct list_head event_list; |
int event_space; |
}; |
/** Wait queue */ |
struct drm_queue { |
atomic_t use_count; /**< Outstanding uses (+1) */ |
atomic_t finalization; /**< Finalization in progress */ |
atomic_t block_count; /**< Count of processes waiting */ |
atomic_t block_read; /**< Queue blocked for reads */ |
wait_queue_head_t read_queue; /**< Processes waiting on block_read */ |
atomic_t block_write; /**< Queue blocked for writes */ |
wait_queue_head_t write_queue; /**< Processes waiting on block_write */ |
atomic_t total_queued; /**< Total queued statistic */ |
atomic_t total_flushed; /**< Total flushes statistic */ |
atomic_t total_locks; /**< Total locks statistics */ |
enum drm_ctx_flags flags; /**< Context preserving and 2D-only */ |
struct drm_waitlist waitlist; /**< Pending buffers */ |
wait_queue_head_t flush_queue; /**< Processes waiting until flush */ |
}; |
/** |
* Lock data. |
*/ |
struct drm_lock_data { |
struct drm_hw_lock *hw_lock; /**< Hardware lock */ |
/** Private of lock holder's file (NULL=kernel) */ |
struct drm_file *file_priv; |
wait_queue_head_t lock_queue; /**< Queue of blocked processes */ |
unsigned long lock_time; /**< Time of last lock in jiffies */ |
spinlock_t spinlock; |
uint32_t kernel_waiters; |
uint32_t user_waiters; |
int idle_has_lock; |
}; |
/** |
* DMA data. |
*/ |
struct drm_device_dma { |
struct drm_buf_entry bufs[DRM_MAX_ORDER + 1]; /**< buffers, grouped by their size order */ |
int buf_count; /**< total number of buffers */ |
struct drm_buf **buflist; /**< Vector of pointers into drm_device_dma::bufs */ |
int seg_count; |
int page_count; /**< number of pages */ |
unsigned long *pagelist; /**< page list */ |
unsigned long byte_count; |
enum { |
_DRM_DMA_USE_AGP = 0x01, |
_DRM_DMA_USE_SG = 0x02, |
_DRM_DMA_USE_FB = 0x04, |
_DRM_DMA_USE_PCI_RO = 0x08 |
} flags; |
}; |
/** |
* AGP memory entry. Stored as a doubly linked list. |
*/ |
struct drm_agp_mem { |
unsigned long handle; /**< handle */ |
DRM_AGP_MEM *memory; |
unsigned long bound; /**< address */ |
int pages; |
struct list_head head; |
}; |
/** |
* AGP data. |
* |
* \sa drm_agp_init() and drm_device::agp. |
*/ |
struct drm_agp_head { |
DRM_AGP_KERN agp_info; /**< AGP device information */ |
struct list_head memory; |
unsigned long mode; /**< AGP mode */ |
struct agp_bridge_data *bridge; |
int enabled; /**< whether the AGP bus as been enabled */ |
int acquired; /**< whether the AGP device has been acquired */ |
unsigned long base; |
int agp_mtrr; |
int cant_use_aperture; |
unsigned long page_mask; |
}; |
/** |
* Scatter-gather memory. |
*/ |
struct drm_sg_mem { |
unsigned long handle; |
void *virtual; |
int pages; |
struct page **pagelist; |
dma_addr_t *busaddr; |
}; |
struct drm_sigdata { |
int context; |
struct drm_hw_lock *lock; |
}; |
/** |
* Kernel side of a mapping |
*/ |
struct drm_local_map { |
resource_size_t offset; /**< Requested physical address (0 for SAREA)*/ |
unsigned long size; /**< Requested physical size (bytes) */ |
enum drm_map_type type; /**< Type of memory to map */ |
enum drm_map_flags flags; /**< Flags */ |
void *handle; /**< User-space: "Handle" to pass to mmap() */ |
/**< Kernel-space: kernel-virtual address */ |
int mtrr; /**< MTRR slot used */ |
}; |
typedef struct drm_local_map drm_local_map_t; |
/** |
* Mappings list |
*/ |
struct drm_map_list { |
struct list_head head; /**< list head */ |
struct drm_hash_item hash; |
struct drm_local_map *map; /**< mapping */ |
uint64_t user_token; |
struct drm_master *master; |
struct drm_mm_node *file_offset_node; /**< fake offset */ |
}; |
/** |
* Context handle list |
*/ |
struct drm_ctx_list { |
struct list_head head; /**< list head */ |
drm_context_t handle; /**< context handle */ |
struct drm_file *tag; /**< associated fd private data */ |
}; |
/* location of GART table */ |
#define DRM_ATI_GART_MAIN 1 |
#define DRM_ATI_GART_FB 2 |
#define DRM_ATI_GART_PCI 1 |
#define DRM_ATI_GART_PCIE 2 |
#define DRM_ATI_GART_IGP 3 |
struct drm_ati_pcigart_info { |
int gart_table_location; |
int gart_reg_if; |
void *addr; |
dma_addr_t bus_addr; |
dma_addr_t table_mask; |
struct drm_dma_handle *table_handle; |
struct drm_local_map mapping; |
int table_size; |
}; |
/** |
* GEM specific mm private for tracking GEM objects |
*/ |
struct drm_gem_mm { |
struct drm_mm offset_manager; /**< Offset mgmt for buffer objects */ |
struct drm_open_hash offset_hash; /**< User token hash table for maps */ |
}; |
/** |
* This structure defines the drm_mm memory object, which will be used by the |
* DRM for its buffer objects. |
*/ |
struct drm_gem_object { |
/** Reference count of this object */ |
struct kref refcount; |
/** Handle count of this object. Each handle also holds a reference */ |
struct kref handlecount; |
/** Related drm device */ |
struct drm_device *dev; |
/** File representing the shmem storage */ |
struct file *filp; |
/* Mapping info for this object */ |
struct drm_map_list map_list; |
/** |
* Size of the object, in bytes. Immutable over the object's |
* lifetime. |
*/ |
size_t size; |
/** |
* Global name for this object, starts at 1. 0 means unnamed. |
* Access is covered by the object_name_lock in the related drm_device |
*/ |
int name; |
/** |
* Memory domains. These monitor which caches contain read/write data |
* related to the object. When transitioning from one set of domains |
* to another, the driver is called to ensure that caches are suitably |
* flushed and invalidated |
*/ |
uint32_t read_domains; |
uint32_t write_domain; |
/** |
* While validating an exec operation, the |
* new read/write domain values are computed here. |
* They will be transferred to the above values |
* at the point that any cache flushing occurs |
*/ |
uint32_t pending_read_domains; |
uint32_t pending_write_domain; |
void *driver_private; |
}; |
#include "drm_crtc.h" |
/* per-master structure */ |
struct drm_master { |
struct kref refcount; /* refcount for this master */ |
struct list_head head; /**< each minor contains a list of masters */ |
struct drm_minor *minor; /**< link back to minor we are a master for */ |
char *unique; /**< Unique identifier: e.g., busid */ |
int unique_len; /**< Length of unique field */ |
int unique_size; /**< amount allocated */ |
int blocked; /**< Blocked due to VC switch? */ |
/** \name Authentication */ |
/*@{ */ |
struct drm_open_hash magiclist; |
struct list_head magicfree; |
/*@} */ |
struct drm_lock_data lock; /**< Information on hardware lock */ |
void *driver_priv; /**< Private structure for driver to use */ |
}; |
/** |
* DRM driver structure. This structure represent the common code for |
* a family of cards. There will one drm_device for each card present |
* in this family |
*/ |
struct drm_driver { |
int (*load) (struct drm_device *, unsigned long flags); |
int (*firstopen) (struct drm_device *); |
int (*open) (struct drm_device *, struct drm_file *); |
void (*preclose) (struct drm_device *, struct drm_file *file_priv); |
void (*postclose) (struct drm_device *, struct drm_file *); |
void (*lastclose) (struct drm_device *); |
int (*unload) (struct drm_device *); |
int (*suspend) (struct drm_device *, pm_message_t state); |
int (*resume) (struct drm_device *); |
int (*dma_ioctl) (struct drm_device *dev, void *data, struct drm_file *file_priv); |
void (*dma_ready) (struct drm_device *); |
int (*dma_quiescent) (struct drm_device *); |
int (*context_ctor) (struct drm_device *dev, int context); |
int (*context_dtor) (struct drm_device *dev, int context); |
int (*kernel_context_switch) (struct drm_device *dev, int old, |
int new); |
void (*kernel_context_switch_unlock) (struct drm_device *dev); |
/** |
* get_vblank_counter - get raw hardware vblank counter |
* @dev: DRM device |
* @crtc: counter to fetch |
* |
* Driver callback for fetching a raw hardware vblank counter |
* for @crtc. If a device doesn't have a hardware counter, the |
* driver can simply return the value of drm_vblank_count and |
* make the enable_vblank() and disable_vblank() hooks into no-ops, |
* leaving interrupts enabled at all times. |
* |
* Wraparound handling and loss of events due to modesetting is dealt |
* with in the DRM core code. |
* |
* RETURNS |
* Raw vblank counter value. |
*/ |
u32 (*get_vblank_counter) (struct drm_device *dev, int crtc); |
/** |
* enable_vblank - enable vblank interrupt events |
* @dev: DRM device |
* @crtc: which irq to enable |
* |
* Enable vblank interrupts for @crtc. If the device doesn't have |
* a hardware vblank counter, this routine should be a no-op, since |
* interrupts will have to stay on to keep the count accurate. |
* |
* RETURNS |
* Zero on success, appropriate errno if the given @crtc's vblank |
* interrupt cannot be enabled. |
*/ |
int (*enable_vblank) (struct drm_device *dev, int crtc); |
/** |
* disable_vblank - disable vblank interrupt events |
* @dev: DRM device |
* @crtc: which irq to enable |
* |
* Disable vblank interrupts for @crtc. If the device doesn't have |
* a hardware vblank counter, this routine should be a no-op, since |
* interrupts will have to stay on to keep the count accurate. |
*/ |
void (*disable_vblank) (struct drm_device *dev, int crtc); |
/** |
* Called by \c drm_device_is_agp. Typically used to determine if a |
* card is really attached to AGP or not. |
* |
* \param dev DRM device handle |
* |
* \returns |
* One of three values is returned depending on whether or not the |
* card is absolutely \b not AGP (return of 0), absolutely \b is AGP |
* (return of 1), or may or may not be AGP (return of 2). |
*/ |
int (*device_is_agp) (struct drm_device *dev); |
/* these have to be filled in */ |
irqreturn_t(*irq_handler) (DRM_IRQ_ARGS); |
void (*irq_preinstall) (struct drm_device *dev); |
int (*irq_postinstall) (struct drm_device *dev); |
void (*irq_uninstall) (struct drm_device *dev); |
void (*reclaim_buffers) (struct drm_device *dev, |
struct drm_file * file_priv); |
void (*reclaim_buffers_locked) (struct drm_device *dev, |
struct drm_file *file_priv); |
void (*reclaim_buffers_idlelocked) (struct drm_device *dev, |
struct drm_file *file_priv); |
resource_size_t (*get_map_ofs) (struct drm_local_map * map); |
resource_size_t (*get_reg_ofs) (struct drm_device *dev); |
void (*set_version) (struct drm_device *dev, |
struct drm_set_version *sv); |
/* Master routines */ |
int (*master_create)(struct drm_device *dev, struct drm_master *master); |
void (*master_destroy)(struct drm_device *dev, struct drm_master *master); |
/** |
* master_set is called whenever the minor master is set. |
* master_drop is called whenever the minor master is dropped. |
*/ |
int (*master_set)(struct drm_device *dev, struct drm_file *file_priv, |
bool from_open); |
void (*master_drop)(struct drm_device *dev, struct drm_file *file_priv, |
bool from_release); |
int (*proc_init)(struct drm_minor *minor); |
void (*proc_cleanup)(struct drm_minor *minor); |
int (*debugfs_init)(struct drm_minor *minor); |
void (*debugfs_cleanup)(struct drm_minor *minor); |
/** |
* Driver-specific constructor for drm_gem_objects, to set up |
* obj->driver_private. |
* |
* Returns 0 on success. |
*/ |
int (*gem_init_object) (struct drm_gem_object *obj); |
void (*gem_free_object) (struct drm_gem_object *obj); |
/* vga arb irq handler */ |
void (*vgaarb_irq)(struct drm_device *dev, bool state); |
/* Driver private ops for this object */ |
struct vm_operations_struct *gem_vm_ops; |
int major; |
int minor; |
int patchlevel; |
char *name; |
char *desc; |
char *date; |
u32 driver_features; |
int dev_priv_size; |
struct drm_ioctl_desc *ioctls; |
int num_ioctls; |
struct file_operations fops; |
struct pci_driver pci_driver; |
/* List of devices hanging off this driver */ |
struct list_head device_list; |
}; |
#define DRM_MINOR_UNASSIGNED 0 |
#define DRM_MINOR_LEGACY 1 |
#define DRM_MINOR_CONTROL 2 |
#define DRM_MINOR_RENDER 3 |
/** |
* debugfs node list. This structure represents a debugfs file to |
* be created by the drm core |
*/ |
struct drm_debugfs_list { |
const char *name; /** file name */ |
int (*show)(struct seq_file*, void*); /** show callback */ |
u32 driver_features; /**< Required driver features for this entry */ |
}; |
/** |
* debugfs node structure. This structure represents a debugfs file. |
*/ |
struct drm_debugfs_node { |
struct list_head list; |
struct drm_minor *minor; |
struct drm_debugfs_list *debugfs_ent; |
struct dentry *dent; |
}; |
/** |
* Info file list entry. This structure represents a debugfs or proc file to |
* be created by the drm core |
*/ |
struct drm_info_list { |
const char *name; /** file name */ |
int (*show)(struct seq_file*, void*); /** show callback */ |
u32 driver_features; /**< Required driver features for this entry */ |
void *data; |
}; |
/** |
* debugfs node structure. This structure represents a debugfs file. |
*/ |
struct drm_info_node { |
struct list_head list; |
struct drm_minor *minor; |
struct drm_info_list *info_ent; |
struct dentry *dent; |
}; |
/** |
* DRM minor structure. This structure represents a drm minor number. |
*/ |
struct drm_minor { |
int index; /**< Minor device number */ |
int type; /**< Control or render */ |
dev_t device; /**< Device number for mknod */ |
struct device kdev; /**< Linux device */ |
struct drm_device *dev; |
struct proc_dir_entry *proc_root; /**< proc directory entry */ |
struct drm_info_node proc_nodes; |
struct dentry *debugfs_root; |
struct drm_info_node debugfs_nodes; |
struct drm_master *master; /* currently active master for this node */ |
struct list_head master_list; |
struct drm_mode_group mode_group; |
}; |
#endif |
/** |
* DRM device structure. This structure represent a complete card that |
* may contain multiple heads. |
*/ |
struct drm_device { |
struct list_head driver_item; /**< list of devices per driver */ |
char *devname; /**< For /proc/interrupts */ |
int if_version; /**< Highest interface version set */ |
/** \name Locks */ |
/*@{ */ |
spinlock_t count_lock; /**< For inuse, drm_device::open_count, drm_device::buf_use */ |
// struct mutex struct_mutex; /**< For others */ |
/*@} */ |
/** \name Usage Counters */ |
/*@{ */ |
int open_count; /**< Outstanding files open */ |
atomic_t ioctl_count; /**< Outstanding IOCTLs pending */ |
atomic_t vma_count; /**< Outstanding vma areas open */ |
int buf_use; /**< Buffers in use -- cannot alloc */ |
atomic_t buf_alloc; /**< Buffer allocation in progress */ |
/*@} */ |
/** \name Performance counters */ |
/*@{ */ |
unsigned long counters; |
// enum drm_stat_type types[15]; |
atomic_t counts[15]; |
/*@} */ |
struct list_head filelist; |
/** \name Memory management */ |
/*@{ */ |
struct list_head maplist; /**< Linked list of regions */ |
int map_count; /**< Number of mappable regions */ |
// struct drm_open_hash map_hash; /**< User token hash table for maps */ |
/** \name Context handle management */ |
/*@{ */ |
struct list_head ctxlist; /**< Linked list of context handles */ |
int ctx_count; /**< Number of context handles */ |
// struct mutex ctxlist_mutex; /**< For ctxlist */ |
// struct idr ctx_idr; |
struct list_head vmalist; /**< List of vmas (for debugging) */ |
/*@} */ |
/** \name DMA queues (contexts) */ |
/*@{ */ |
int queue_count; /**< Number of active DMA queues */ |
int queue_reserved; /**< Number of reserved DMA queues */ |
int queue_slots; /**< Actual length of queuelist */ |
// struct drm_queue **queuelist; /**< Vector of pointers to DMA queues */ |
// struct drm_device_dma *dma; /**< Optional pointer for DMA support */ |
/*@} */ |
/** \name Context support */ |
/*@{ */ |
int irq_enabled; /**< True if irq handler is enabled */ |
__volatile__ long context_flag; /**< Context swapping flag */ |
__volatile__ long interrupt_flag; /**< Interruption handler flag */ |
__volatile__ long dma_flag; /**< DMA dispatch flag */ |
// struct timer_list timer; /**< Timer for delaying ctx switch */ |
// wait_queue_head_t context_wait; /**< Processes waiting on ctx switch */ |
int last_checked; /**< Last context checked for DMA */ |
int last_context; /**< Last current context */ |
unsigned long last_switch; /**< jiffies at last context switch */ |
/*@} */ |
// struct work_struct work; |
/** \name VBLANK IRQ support */ |
/*@{ */ |
/* |
* At load time, disabling the vblank interrupt won't be allowed since |
* old clients may not call the modeset ioctl and therefore misbehave. |
* Once the modeset ioctl *has* been called though, we can safely |
* disable them when unused. |
*/ |
int vblank_disable_allowed; |
// wait_queue_head_t *vbl_queue; /**< VBLANK wait queue */ |
atomic_t *_vblank_count; /**< number of VBLANK interrupts (driver must alloc the right number of counters) */ |
spinlock_t vbl_lock; |
atomic_t *vblank_refcount; /* number of users of vblank interruptsper crtc */ |
u32 *last_vblank; /* protected by dev->vbl_lock, used */ |
/* for wraparound handling */ |
int *vblank_enabled; /* so we don't call enable more than |
once per disable */ |
int *vblank_inmodeset; /* Display driver is setting mode */ |
u32 *last_vblank_wait; /* Last vblank seqno waited per CRTC */ |
// struct timer_list vblank_disable_timer; |
u32 max_vblank_count; /**< size of vblank counter register */ |
/*@} */ |
// cycles_t ctx_start; |
// cycles_t lck_start; |
// struct fasync_struct *buf_async;/**< Processes waiting for SIGIO */ |
// wait_queue_head_t buf_readers; /**< Processes waiting to read */ |
// wait_queue_head_t buf_writers; /**< Processes waiting to ctx switch */ |
// struct drm_agp_head *agp; /**< AGP data */ |
struct pci_dev *pdev; /**< PCI device structure */ |
int pci_vendor; /**< PCI vendor id */ |
int pci_device; /**< PCI device id */ |
// struct drm_sg_mem *sg; /**< Scatter gather memory */ |
int num_crtcs; /**< Number of CRTCs on this device */ |
void *dev_private; /**< device private data */ |
void *mm_private; |
struct address_space *dev_mapping; |
// struct drm_sigdata sigdata; /**< For block_all_signals */ |
// sigset_t sigmask; |
// struct drm_driver *driver; |
// struct drm_local_map *agp_buffer_map; |
// unsigned int agp_buffer_token; |
// struct drm_minor *control; /**< Control node for card */ |
// struct drm_minor *primary; /**< render type primary screen head */ |
/** \name Drawable information */ |
/*@{ */ |
spinlock_t drw_lock; |
// struct idr drw_idr; |
/*@} */ |
struct drm_mode_config mode_config; /**< Current mode config */ |
/** \name GEM information */ |
/*@{ */ |
spinlock_t object_name_lock; |
// struct idr object_name_idr; |
atomic_t object_count; |
atomic_t object_memory; |
atomic_t pin_count; |
atomic_t pin_memory; |
atomic_t gtt_count; |
atomic_t gtt_memory; |
uint32_t gtt_total; |
uint32_t invalidate_domains; /* domains pending invalidation */ |
uint32_t flush_domains; /* domains pending flush */ |
/*@} */ |
}; |
static __inline__ int drm_device_is_agp(struct drm_device *dev) |
{ |
return pci_find_capability(dev->pdev, PCI_CAP_ID_AGP); |
} |
static __inline__ int drm_device_is_pcie(struct drm_device *dev) |
{ |
return pci_find_capability(dev->pdev, PCI_CAP_ID_EXP); |
} |
#if 0 |
static inline int drm_dev_to_irq(struct drm_device *dev) |
{ |
return dev->pdev->irq; |
} |
static __inline__ int drm_core_check_feature(struct drm_device *dev, |
int feature) |
{ |
return ((dev->driver->driver_features & feature) ? 1 : 0); |
} |
#ifdef __alpha__ |
#define drm_get_pci_domain(dev) dev->hose->index |
#else |
#define drm_get_pci_domain(dev) 0 |
#endif |
#if __OS_HAS_AGP |
static inline int drm_core_has_AGP(struct drm_device *dev) |
{ |
return drm_core_check_feature(dev, DRIVER_USE_AGP); |
} |
#else |
#define drm_core_has_AGP(dev) (0) |
#endif |
#if __OS_HAS_MTRR |
static inline int drm_core_has_MTRR(struct drm_device *dev) |
{ |
return drm_core_check_feature(dev, DRIVER_USE_MTRR); |
} |
#define DRM_MTRR_WC MTRR_TYPE_WRCOMB |
static inline int drm_mtrr_add(unsigned long offset, unsigned long size, |
unsigned int flags) |
{ |
return mtrr_add(offset, size, flags, 1); |
} |
static inline int drm_mtrr_del(int handle, unsigned long offset, |
unsigned long size, unsigned int flags) |
{ |
return mtrr_del(handle, offset, size); |
} |
#else |
#define drm_core_has_MTRR(dev) (0) |
#define DRM_MTRR_WC 0 |
static inline int drm_mtrr_add(unsigned long offset, unsigned long size, |
unsigned int flags) |
{ |
return 0; |
} |
static inline int drm_mtrr_del(int handle, unsigned long offset, |
unsigned long size, unsigned int flags) |
{ |
return 0; |
} |
#endif |
/******************************************************************/ |
/** \name Internal function definitions */ |
/*@{*/ |
/* Driver support (drm_drv.h) */ |
extern int drm_init(struct drm_driver *driver); |
extern void drm_exit(struct drm_driver *driver); |
extern long drm_ioctl(struct file *filp, |
unsigned int cmd, unsigned long arg); |
extern long drm_compat_ioctl(struct file *filp, |
unsigned int cmd, unsigned long arg); |
extern int drm_lastclose(struct drm_device *dev); |
/* Device support (drm_fops.h) */ |
extern int drm_open(struct inode *inode, struct file *filp); |
extern int drm_stub_open(struct inode *inode, struct file *filp); |
extern int drm_fasync(int fd, struct file *filp, int on); |
extern ssize_t drm_read(struct file *filp, char __user *buffer, |
size_t count, loff_t *offset); |
extern int drm_release(struct inode *inode, struct file *filp); |
/* Mapping support (drm_vm.h) */ |
extern int drm_mmap(struct file *filp, struct vm_area_struct *vma); |
extern int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma); |
extern void drm_vm_open_locked(struct vm_area_struct *vma); |
extern resource_size_t drm_core_get_map_ofs(struct drm_local_map * map); |
extern resource_size_t drm_core_get_reg_ofs(struct drm_device *dev); |
extern unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait); |
/* Memory management support (drm_memory.h) */ |
#include "drm_memory.h" |
extern void drm_mem_init(void); |
extern int drm_mem_info(char *buf, char **start, off_t offset, |
int request, int *eof, void *data); |
extern void *drm_realloc(void *oldpt, size_t oldsize, size_t size, int area); |
extern DRM_AGP_MEM *drm_alloc_agp(struct drm_device *dev, int pages, u32 type); |
extern int drm_free_agp(DRM_AGP_MEM * handle, int pages); |
extern int drm_bind_agp(DRM_AGP_MEM * handle, unsigned int start); |
extern DRM_AGP_MEM *drm_agp_bind_pages(struct drm_device *dev, |
struct page **pages, |
unsigned long num_pages, |
uint32_t gtt_offset, |
uint32_t type); |
extern int drm_unbind_agp(DRM_AGP_MEM * handle); |
/* Misc. IOCTL support (drm_ioctl.h) */ |
extern int drm_irq_by_busid(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
extern int drm_getunique(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
extern int drm_setunique(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
extern int drm_getmap(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
extern int drm_getclient(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
extern int drm_getstats(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
extern int drm_setversion(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
extern int drm_noop(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
/* Context IOCTL support (drm_context.h) */ |
extern int drm_resctx(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
extern int drm_addctx(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
extern int drm_modctx(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
extern int drm_getctx(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
extern int drm_switchctx(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
extern int drm_newctx(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
extern int drm_rmctx(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
extern int drm_ctxbitmap_init(struct drm_device *dev); |
extern void drm_ctxbitmap_cleanup(struct drm_device *dev); |
extern void drm_ctxbitmap_free(struct drm_device *dev, int ctx_handle); |
extern int drm_setsareactx(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
extern int drm_getsareactx(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
/* Drawable IOCTL support (drm_drawable.h) */ |
extern int drm_adddraw(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
extern int drm_rmdraw(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
extern int drm_update_drawable_info(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
extern struct drm_drawable_info *drm_get_drawable_info(struct drm_device *dev, |
drm_drawable_t id); |
extern void drm_drawable_free_all(struct drm_device *dev); |
/* Authentication IOCTL support (drm_auth.h) */ |
extern int drm_getmagic(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
extern int drm_authmagic(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
/* Cache management (drm_cache.c) */ |
void drm_clflush_pages(struct page *pages[], unsigned long num_pages); |
/* Locking IOCTL support (drm_lock.h) */ |
extern int drm_lock(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
extern int drm_unlock(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
extern int drm_lock_take(struct drm_lock_data *lock_data, unsigned int context); |
extern int drm_lock_free(struct drm_lock_data *lock_data, unsigned int context); |
extern void drm_idlelock_take(struct drm_lock_data *lock_data); |
extern void drm_idlelock_release(struct drm_lock_data *lock_data); |
/* |
* These are exported to drivers so that they can implement fencing using |
* DMA quiscent + idle. DMA quiescent usually requires the hardware lock. |
*/ |
extern int drm_i_have_hw_lock(struct drm_device *dev, struct drm_file *file_priv); |
/* Buffer management support (drm_bufs.h) */ |
extern int drm_addbufs_agp(struct drm_device *dev, struct drm_buf_desc * request); |
extern int drm_addbufs_pci(struct drm_device *dev, struct drm_buf_desc * request); |
extern int drm_addmap(struct drm_device *dev, resource_size_t offset, |
unsigned int size, enum drm_map_type type, |
enum drm_map_flags flags, struct drm_local_map **map_ptr); |
extern int drm_addmap_ioctl(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
extern int drm_rmmap(struct drm_device *dev, struct drm_local_map *map); |
extern int drm_rmmap_locked(struct drm_device *dev, struct drm_local_map *map); |
extern int drm_rmmap_ioctl(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
extern int drm_addbufs(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
extern int drm_infobufs(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
extern int drm_markbufs(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
extern int drm_freebufs(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
extern int drm_mapbufs(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
extern int drm_order(unsigned long size); |
extern resource_size_t drm_get_resource_start(struct drm_device *dev, |
unsigned int resource); |
extern resource_size_t drm_get_resource_len(struct drm_device *dev, |
unsigned int resource); |
/* DMA support (drm_dma.h) */ |
extern int drm_dma_setup(struct drm_device *dev); |
extern void drm_dma_takedown(struct drm_device *dev); |
extern void drm_free_buffer(struct drm_device *dev, struct drm_buf * buf); |
extern void drm_core_reclaim_buffers(struct drm_device *dev, |
struct drm_file *filp); |
/* IRQ support (drm_irq.h) */ |
extern int drm_control(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
extern irqreturn_t drm_irq_handler(DRM_IRQ_ARGS); |
extern int drm_irq_install(struct drm_device *dev); |
extern int drm_irq_uninstall(struct drm_device *dev); |
extern void drm_driver_irq_preinstall(struct drm_device *dev); |
extern void drm_driver_irq_postinstall(struct drm_device *dev); |
extern void drm_driver_irq_uninstall(struct drm_device *dev); |
extern int drm_vblank_init(struct drm_device *dev, int num_crtcs); |
extern int drm_wait_vblank(struct drm_device *dev, void *data, |
struct drm_file *filp); |
extern int drm_vblank_wait(struct drm_device *dev, unsigned int *vbl_seq); |
extern u32 drm_vblank_count(struct drm_device *dev, int crtc); |
extern void drm_handle_vblank(struct drm_device *dev, int crtc); |
extern int drm_vblank_get(struct drm_device *dev, int crtc); |
extern void drm_vblank_put(struct drm_device *dev, int crtc); |
extern void drm_vblank_off(struct drm_device *dev, int crtc); |
extern void drm_vblank_cleanup(struct drm_device *dev); |
/* Modesetting support */ |
extern void drm_vblank_pre_modeset(struct drm_device *dev, int crtc); |
extern void drm_vblank_post_modeset(struct drm_device *dev, int crtc); |
extern int drm_modeset_ctl(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
/* AGP/GART support (drm_agpsupport.h) */ |
extern struct drm_agp_head *drm_agp_init(struct drm_device *dev); |
extern int drm_agp_acquire(struct drm_device *dev); |
extern int drm_agp_acquire_ioctl(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
extern int drm_agp_release(struct drm_device *dev); |
extern int drm_agp_release_ioctl(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
extern int drm_agp_enable(struct drm_device *dev, struct drm_agp_mode mode); |
extern int drm_agp_enable_ioctl(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
extern int drm_agp_info(struct drm_device *dev, struct drm_agp_info *info); |
extern int drm_agp_info_ioctl(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
extern int drm_agp_alloc(struct drm_device *dev, struct drm_agp_buffer *request); |
extern int drm_agp_alloc_ioctl(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
extern int drm_agp_free(struct drm_device *dev, struct drm_agp_buffer *request); |
extern int drm_agp_free_ioctl(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
extern int drm_agp_unbind(struct drm_device *dev, struct drm_agp_binding *request); |
extern int drm_agp_unbind_ioctl(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
extern int drm_agp_bind(struct drm_device *dev, struct drm_agp_binding *request); |
extern int drm_agp_bind_ioctl(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
extern DRM_AGP_MEM *drm_agp_allocate_memory(struct agp_bridge_data *bridge, size_t pages, u32 type); |
extern int drm_agp_free_memory(DRM_AGP_MEM * handle); |
extern int drm_agp_bind_memory(DRM_AGP_MEM * handle, off_t start); |
extern int drm_agp_unbind_memory(DRM_AGP_MEM * handle); |
extern void drm_agp_chipset_flush(struct drm_device *dev); |
/* Stub support (drm_stub.h) */ |
extern int drm_setmaster_ioctl(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
extern int drm_dropmaster_ioctl(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
struct drm_master *drm_master_create(struct drm_minor *minor); |
extern struct drm_master *drm_master_get(struct drm_master *master); |
extern void drm_master_put(struct drm_master **master); |
extern int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent, |
struct drm_driver *driver); |
extern void drm_put_dev(struct drm_device *dev); |
extern int drm_put_minor(struct drm_minor **minor); |
extern unsigned int drm_debug; |
extern struct class *drm_class; |
extern struct proc_dir_entry *drm_proc_root; |
extern struct dentry *drm_debugfs_root; |
extern struct idr drm_minors_idr; |
extern struct drm_local_map *drm_getsarea(struct drm_device *dev); |
/* Proc support (drm_proc.h) */ |
extern int drm_proc_init(struct drm_minor *minor, int minor_id, |
struct proc_dir_entry *root); |
extern int drm_proc_cleanup(struct drm_minor *minor, struct proc_dir_entry *root); |
/* Debugfs support */ |
#if defined(CONFIG_DEBUG_FS) |
extern int drm_debugfs_init(struct drm_minor *minor, int minor_id, |
struct dentry *root); |
extern int drm_debugfs_create_files(struct drm_info_list *files, int count, |
struct dentry *root, struct drm_minor *minor); |
extern int drm_debugfs_remove_files(struct drm_info_list *files, int count, |
struct drm_minor *minor); |
extern int drm_debugfs_cleanup(struct drm_minor *minor); |
#endif |
/* Info file support */ |
extern int drm_name_info(struct seq_file *m, void *data); |
extern int drm_vm_info(struct seq_file *m, void *data); |
extern int drm_queues_info(struct seq_file *m, void *data); |
extern int drm_bufs_info(struct seq_file *m, void *data); |
extern int drm_vblank_info(struct seq_file *m, void *data); |
extern int drm_clients_info(struct seq_file *m, void* data); |
extern int drm_gem_name_info(struct seq_file *m, void *data); |
extern int drm_gem_object_info(struct seq_file *m, void* data); |
#if DRM_DEBUG_CODE |
extern int drm_vma_info(struct seq_file *m, void *data); |
#endif |
/* Scatter Gather Support (drm_scatter.h) */ |
extern void drm_sg_cleanup(struct drm_sg_mem * entry); |
extern int drm_sg_alloc_ioctl(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
extern int drm_sg_alloc(struct drm_device *dev, struct drm_scatter_gather * request); |
extern int drm_sg_free(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
/* ATI PCIGART support (ati_pcigart.h) */ |
extern int drm_ati_pcigart_init(struct drm_device *dev, |
struct drm_ati_pcigart_info * gart_info); |
extern int drm_ati_pcigart_cleanup(struct drm_device *dev, |
struct drm_ati_pcigart_info * gart_info); |
extern drm_dma_handle_t *drm_pci_alloc(struct drm_device *dev, size_t size, |
size_t align, dma_addr_t maxaddr); |
extern void __drm_pci_free(struct drm_device *dev, drm_dma_handle_t * dmah); |
extern void drm_pci_free(struct drm_device *dev, drm_dma_handle_t * dmah); |
/* sysfs support (drm_sysfs.c) */ |
struct drm_sysfs_class; |
extern struct class *drm_sysfs_create(struct module *owner, char *name); |
extern void drm_sysfs_destroy(void); |
extern int drm_sysfs_device_add(struct drm_minor *minor); |
extern void drm_sysfs_hotplug_event(struct drm_device *dev); |
extern void drm_sysfs_device_remove(struct drm_minor *minor); |
extern char *drm_get_connector_status_name(enum drm_connector_status status); |
static inline int drm_sysfs_connector_add(struct drm_connector *connector) |
{ return 0; }; |
static inline void drm_sysfs_connector_remove(struct drm_connector *connector) |
{ }; |
/* Graphics Execution Manager library functions (drm_gem.c) */ |
int drm_gem_init(struct drm_device *dev); |
void drm_gem_destroy(struct drm_device *dev); |
void drm_gem_object_free(struct kref *kref); |
struct drm_gem_object *drm_gem_object_alloc(struct drm_device *dev, |
size_t size); |
void drm_gem_object_handle_free(struct kref *kref); |
void drm_gem_vm_open(struct vm_area_struct *vma); |
void drm_gem_vm_close(struct vm_area_struct *vma); |
int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma); |
static inline void |
drm_gem_object_reference(struct drm_gem_object *obj) |
{ |
kref_get(&obj->refcount); |
} |
static inline void |
drm_gem_object_unreference(struct drm_gem_object *obj) |
{ |
if (obj == NULL) |
return; |
kref_put(&obj->refcount, drm_gem_object_free); |
} |
int drm_gem_handle_create(struct drm_file *file_priv, |
struct drm_gem_object *obj, |
u32 *handlep); |
static inline void |
drm_gem_object_handle_reference(struct drm_gem_object *obj) |
{ |
drm_gem_object_reference(obj); |
kref_get(&obj->handlecount); |
} |
static inline void |
drm_gem_object_handle_unreference(struct drm_gem_object *obj) |
{ |
if (obj == NULL) |
return; |
/* |
* Must bump handle count first as this may be the last |
* ref, in which case the object would disappear before we |
* checked for a name |
*/ |
kref_put(&obj->handlecount, drm_gem_object_handle_free); |
drm_gem_object_unreference(obj); |
} |
struct drm_gem_object *drm_gem_object_lookup(struct drm_device *dev, |
struct drm_file *filp, |
u32 handle); |
int drm_gem_close_ioctl(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
int drm_gem_flink_ioctl(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
int drm_gem_open_ioctl(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
void drm_gem_open(struct drm_device *dev, struct drm_file *file_private); |
void drm_gem_release(struct drm_device *dev, struct drm_file *file_private); |
extern void drm_core_ioremap(struct drm_local_map *map, struct drm_device *dev); |
extern void drm_core_ioremap_wc(struct drm_local_map *map, struct drm_device *dev); |
extern void drm_core_ioremapfree(struct drm_local_map *map, struct drm_device *dev); |
static __inline__ struct drm_local_map *drm_core_findmap(struct drm_device *dev, |
unsigned int token) |
{ |
struct drm_map_list *_entry; |
list_for_each_entry(_entry, &dev->maplist, head) |
if (_entry->user_token == token) |
return _entry->map; |
return NULL; |
} |
static __inline__ void drm_core_dropmap(struct drm_local_map *map) |
{ |
} |
static __inline__ void *drm_calloc_large(size_t nmemb, size_t size) |
{ |
if (size * nmemb <= PAGE_SIZE) |
return kcalloc(nmemb, size, GFP_KERNEL); |
if (size != 0 && nmemb > ULONG_MAX / size) |
return NULL; |
return __vmalloc(size * nmemb, |
GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL); |
} |
static __inline void drm_free_large(void *ptr) |
{ |
if (!is_vmalloc_addr(ptr)) |
return kfree(ptr); |
vfree(ptr); |
} |
/*@}*/ |
#endif |
#endif /* __KERNEL__ */ |
#endif |
/drivers/include/drm/drm_crtc.h |
---|
0,0 → 1,804 |
/* |
* Copyright © 2006 Keith Packard |
* Copyright © 2007-2008 Dave Airlie |
* Copyright © 2007-2008 Intel Corporation |
* Jesse Barnes <jesse.barnes@intel.com> |
* |
* Permission is hereby granted, free of charge, to any person obtaining a |
* copy of this software and associated documentation files (the "Software"), |
* to deal in the Software without restriction, including without limitation |
* the rights to use, copy, modify, merge, publish, distribute, sublicense, |
* and/or sell copies of the Software, and to permit persons to whom the |
* Software is furnished to do so, subject to the following conditions: |
* |
* The above copyright notice and this permission notice shall be included in |
* all copies or substantial portions of the Software. |
* |
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
* OTHER DEALINGS IN THE SOFTWARE. |
*/ |
#ifndef __DRM_CRTC_H__ |
#define __DRM_CRTC_H__ |
#include <linux/i2c.h> |
//#include <linux/spinlock.h> |
#include <linux/types.h> |
#include <linux/idr.h> |
#include <linux/fb.h> |
struct drm_device; |
struct drm_mode_set; |
struct drm_framebuffer; |
#define DRM_MODE_OBJECT_CRTC 0xcccccccc |
#define DRM_MODE_OBJECT_CONNECTOR 0xc0c0c0c0 |
#define DRM_MODE_OBJECT_ENCODER 0xe0e0e0e0 |
#define DRM_MODE_OBJECT_MODE 0xdededede |
#define DRM_MODE_OBJECT_PROPERTY 0xb0b0b0b0 |
#define DRM_MODE_OBJECT_FB 0xfbfbfbfb |
#define DRM_MODE_OBJECT_BLOB 0xbbbbbbbb |
struct drm_mode_object { |
uint32_t id; |
uint32_t type; |
}; |
/* |
* Note on terminology: here, for brevity and convenience, we refer to connector |
* control chips as 'CRTCs'. They can control any type of connector, VGA, LVDS, |
* DVI, etc. And 'screen' refers to the whole of the visible display, which |
* may span multiple monitors (and therefore multiple CRTC and connector |
* structures). |
*/ |
enum drm_mode_status { |
MODE_OK = 0, /* Mode OK */ |
MODE_HSYNC, /* hsync out of range */ |
MODE_VSYNC, /* vsync out of range */ |
MODE_H_ILLEGAL, /* mode has illegal horizontal timings */ |
MODE_V_ILLEGAL, /* mode has illegal horizontal timings */ |
MODE_BAD_WIDTH, /* requires an unsupported linepitch */ |
MODE_NOMODE, /* no mode with a maching name */ |
MODE_NO_INTERLACE, /* interlaced mode not supported */ |
MODE_NO_DBLESCAN, /* doublescan mode not supported */ |
MODE_NO_VSCAN, /* multiscan mode not supported */ |
MODE_MEM, /* insufficient video memory */ |
MODE_VIRTUAL_X, /* mode width too large for specified virtual size */ |
MODE_VIRTUAL_Y, /* mode height too large for specified virtual size */ |
MODE_MEM_VIRT, /* insufficient video memory given virtual size */ |
MODE_NOCLOCK, /* no fixed clock available */ |
MODE_CLOCK_HIGH, /* clock required is too high */ |
MODE_CLOCK_LOW, /* clock required is too low */ |
MODE_CLOCK_RANGE, /* clock/mode isn't in a ClockRange */ |
MODE_BAD_HVALUE, /* horizontal timing was out of range */ |
MODE_BAD_VVALUE, /* vertical timing was out of range */ |
MODE_BAD_VSCAN, /* VScan value out of range */ |
MODE_HSYNC_NARROW, /* horizontal sync too narrow */ |
MODE_HSYNC_WIDE, /* horizontal sync too wide */ |
MODE_HBLANK_NARROW, /* horizontal blanking too narrow */ |
MODE_HBLANK_WIDE, /* horizontal blanking too wide */ |
MODE_VSYNC_NARROW, /* vertical sync too narrow */ |
MODE_VSYNC_WIDE, /* vertical sync too wide */ |
MODE_VBLANK_NARROW, /* vertical blanking too narrow */ |
MODE_VBLANK_WIDE, /* vertical blanking too wide */ |
MODE_PANEL, /* exceeds panel dimensions */ |
MODE_INTERLACE_WIDTH, /* width too large for interlaced mode */ |
MODE_ONE_WIDTH, /* only one width is supported */ |
MODE_ONE_HEIGHT, /* only one height is supported */ |
MODE_ONE_SIZE, /* only one resolution is supported */ |
MODE_NO_REDUCED, /* monitor doesn't accept reduced blanking */ |
MODE_UNVERIFIED = -3, /* mode needs to reverified */ |
MODE_BAD = -2, /* unspecified reason */ |
MODE_ERROR = -1 /* error condition */ |
}; |
#define DRM_MODE_TYPE_CLOCK_CRTC_C (DRM_MODE_TYPE_CLOCK_C | \ |
DRM_MODE_TYPE_CRTC_C) |
#define DRM_MODE(nm, t, c, hd, hss, hse, ht, hsk, vd, vss, vse, vt, vs, f) \ |
.name = nm, .status = 0, .type = (t), .clock = (c), \ |
.hdisplay = (hd), .hsync_start = (hss), .hsync_end = (hse), \ |
.htotal = (ht), .hskew = (hsk), .vdisplay = (vd), \ |
.vsync_start = (vss), .vsync_end = (vse), .vtotal = (vt), \ |
.vscan = (vs), .flags = (f), .vrefresh = 0 |
#define CRTC_INTERLACE_HALVE_V 0x1 /* halve V values for interlacing */ |
struct drm_display_mode { |
/* Header */ |
struct list_head head; |
struct drm_mode_object base; |
char name[DRM_DISPLAY_MODE_LEN]; |
int connector_count; |
enum drm_mode_status status; |
int type; |
/* Proposed mode values */ |
int clock; /* in kHz */ |
int hdisplay; |
int hsync_start; |
int hsync_end; |
int htotal; |
int hskew; |
int vdisplay; |
int vsync_start; |
int vsync_end; |
int vtotal; |
int vscan; |
unsigned int flags; |
/* Addressable image size (may be 0 for projectors, etc.) */ |
int width_mm; |
int height_mm; |
/* Actual mode we give to hw */ |
int clock_index; |
int synth_clock; |
int crtc_hdisplay; |
int crtc_hblank_start; |
int crtc_hblank_end; |
int crtc_hsync_start; |
int crtc_hsync_end; |
int crtc_htotal; |
int crtc_hskew; |
int crtc_vdisplay; |
int crtc_vblank_start; |
int crtc_vblank_end; |
int crtc_vsync_start; |
int crtc_vsync_end; |
int crtc_vtotal; |
int crtc_hadjusted; |
int crtc_vadjusted; |
/* Driver private mode info */ |
int private_size; |
int *private; |
int private_flags; |
int vrefresh; /* in Hz */ |
int hsync; /* in kHz */ |
}; |
enum drm_connector_status { |
connector_status_connected = 1, |
connector_status_disconnected = 2, |
connector_status_unknown = 3, |
}; |
enum subpixel_order { |
SubPixelUnknown = 0, |
SubPixelHorizontalRGB, |
SubPixelHorizontalBGR, |
SubPixelVerticalRGB, |
SubPixelVerticalBGR, |
SubPixelNone, |
}; |
/* |
* Describes a given display (e.g. CRT or flat panel) and its limitations. |
*/ |
struct drm_display_info { |
char name[DRM_DISPLAY_INFO_LEN]; |
/* Input info */ |
bool serration_vsync; |
bool sync_on_green; |
bool composite_sync; |
bool separate_syncs; |
bool blank_to_black; |
unsigned char video_level; |
bool digital; |
/* Physical size */ |
unsigned int width_mm; |
unsigned int height_mm; |
/* Display parameters */ |
unsigned char gamma; /* FIXME: storage format */ |
bool gtf_supported; |
bool standard_color; |
enum { |
monochrome = 0, |
rgb, |
other, |
unknown, |
} display_type; |
bool active_off_supported; |
bool suspend_supported; |
bool standby_supported; |
/* Color info FIXME: storage format */ |
unsigned short redx, redy; |
unsigned short greenx, greeny; |
unsigned short bluex, bluey; |
unsigned short whitex, whitey; |
/* Clock limits FIXME: storage format */ |
unsigned int min_vfreq, max_vfreq; |
unsigned int min_hfreq, max_hfreq; |
unsigned int pixel_clock; |
/* White point indices FIXME: storage format */ |
unsigned int wpx1, wpy1; |
unsigned int wpgamma1; |
unsigned int wpx2, wpy2; |
unsigned int wpgamma2; |
enum subpixel_order subpixel_order; |
char *raw_edid; /* if any */ |
}; |
struct drm_framebuffer_funcs { |
void (*destroy)(struct drm_framebuffer *framebuffer); |
int (*create_handle)(struct drm_framebuffer *fb, |
struct drm_file *file_priv, |
unsigned int *handle); |
/** |
* Optinal callback for the dirty fb ioctl. |
* |
* Userspace can notify the driver via this callback |
* that a area of the framebuffer has changed and should |
* be flushed to the display hardware. |
* |
* See documentation in drm_mode.h for the struct |
* drm_mode_fb_dirty_cmd for more information as all |
* the semantics and arguments have a one to one mapping |
* on this function. |
*/ |
int (*dirty)(struct drm_framebuffer *framebuffer, unsigned flags, |
unsigned color, struct drm_clip_rect *clips, |
unsigned num_clips); |
}; |
struct drm_framebuffer { |
struct drm_device *dev; |
struct list_head head; |
struct drm_mode_object base; |
const struct drm_framebuffer_funcs *funcs; |
unsigned int pitch; |
unsigned int width; |
unsigned int height; |
/* depth can be 15 or 16 */ |
unsigned int depth; |
int bits_per_pixel; |
int flags; |
struct fb_info *fbdev; |
u32 pseudo_palette[17]; |
struct list_head filp_head; |
/* if you are using the helper */ |
void *helper_private; |
}; |
struct drm_property_blob { |
struct drm_mode_object base; |
struct list_head head; |
unsigned int length; |
void *data; |
}; |
struct drm_property_enum { |
uint64_t value; |
struct list_head head; |
char name[DRM_PROP_NAME_LEN]; |
}; |
struct drm_property { |
struct list_head head; |
struct drm_mode_object base; |
uint32_t flags; |
char name[DRM_PROP_NAME_LEN]; |
uint32_t num_values; |
uint64_t *values; |
struct list_head enum_blob_list; |
}; |
struct drm_crtc; |
struct drm_connector; |
struct drm_encoder; |
struct drm_pending_vblank_event; |
/** |
* drm_crtc_funcs - control CRTCs for a given device |
* @dpms: control display power levels |
* @save: save CRTC state |
* @resore: restore CRTC state |
* @lock: lock the CRTC |
* @unlock: unlock the CRTC |
* @shadow_allocate: allocate shadow pixmap |
* @shadow_create: create shadow pixmap for rotation support |
* @shadow_destroy: free shadow pixmap |
* @mode_fixup: fixup proposed mode |
* @mode_set: set the desired mode on the CRTC |
* @gamma_set: specify color ramp for CRTC |
* @destroy: deinit and free object. |
* |
* The drm_crtc_funcs structure is the central CRTC management structure |
* in the DRM. Each CRTC controls one or more connectors (note that the name |
* CRTC is simply historical, a CRTC may control LVDS, VGA, DVI, TV out, etc. |
* connectors, not just CRTs). |
* |
* Each driver is responsible for filling out this structure at startup time, |
* in addition to providing other modesetting features, like i2c and DDC |
* bus accessors. |
*/ |
struct drm_crtc_funcs { |
/* Save CRTC state */ |
void (*save)(struct drm_crtc *crtc); /* suspend? */ |
/* Restore CRTC state */ |
void (*restore)(struct drm_crtc *crtc); /* resume? */ |
/* cursor controls */ |
int (*cursor_set)(struct drm_crtc *crtc, struct drm_file *file_priv, |
uint32_t handle, uint32_t width, uint32_t height); |
int (*cursor_move)(struct drm_crtc *crtc, int x, int y); |
/* Set gamma on the CRTC */ |
void (*gamma_set)(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b, |
uint32_t size); |
/* Object destroy routine */ |
void (*destroy)(struct drm_crtc *crtc); |
int (*set_config)(struct drm_mode_set *set); |
/* |
* Flip to the given framebuffer. This implements the page |
* flip ioctl descibed in drm_mode.h, specifically, the |
* implementation must return immediately and block all |
* rendering to the current fb until the flip has completed. |
* If userspace set the event flag in the ioctl, the event |
* argument will point to an event to send back when the flip |
* completes, otherwise it will be NULL. |
*/ |
int (*page_flip)(struct drm_crtc *crtc, |
struct drm_framebuffer *fb, |
struct drm_pending_vblank_event *event); |
}; |
/** |
* drm_crtc - central CRTC control structure |
* @enabled: is this CRTC enabled? |
* @x: x position on screen |
* @y: y position on screen |
* @desired_mode: new desired mode |
* @desired_x: desired x for desired_mode |
* @desired_y: desired y for desired_mode |
* @funcs: CRTC control functions |
* |
* Each CRTC may have one or more connectors associated with it. This structure |
* allows the CRTC to be controlled. |
*/ |
struct drm_crtc { |
struct drm_device *dev; |
struct list_head head; |
struct drm_mode_object base; |
/* framebuffer the connector is currently bound to */ |
struct drm_framebuffer *fb; |
bool enabled; |
struct drm_display_mode mode; |
int x, y; |
struct drm_display_mode *desired_mode; |
int desired_x, desired_y; |
const struct drm_crtc_funcs *funcs; |
/* CRTC gamma size for reporting to userspace */ |
uint32_t gamma_size; |
uint16_t *gamma_store; |
/* if you are using the helper */ |
void *helper_private; |
}; |
/** |
* drm_connector_funcs - control connectors on a given device |
* @dpms: set power state (see drm_crtc_funcs above) |
* @save: save connector state |
* @restore: restore connector state |
* @mode_valid: is this mode valid on the given connector? |
* @mode_fixup: try to fixup proposed mode for this connector |
* @mode_set: set this mode |
* @detect: is this connector active? |
* @get_modes: get mode list for this connector |
* @set_property: property for this connector may need update |
* @destroy: make object go away |
* @force: notify the driver the connector is forced on |
* |
* Each CRTC may have one or more connectors attached to it. The functions |
* below allow the core DRM code to control connectors, enumerate available modes, |
* etc. |
*/ |
struct drm_connector_funcs { |
void (*dpms)(struct drm_connector *connector, int mode); |
void (*save)(struct drm_connector *connector); |
void (*restore)(struct drm_connector *connector); |
enum drm_connector_status (*detect)(struct drm_connector *connector); |
int (*fill_modes)(struct drm_connector *connector, uint32_t max_width, uint32_t max_height); |
int (*set_property)(struct drm_connector *connector, struct drm_property *property, |
uint64_t val); |
void (*destroy)(struct drm_connector *connector); |
void (*force)(struct drm_connector *connector); |
}; |
struct drm_encoder_funcs { |
void (*destroy)(struct drm_encoder *encoder); |
}; |
#define DRM_CONNECTOR_MAX_UMODES 16 |
#define DRM_CONNECTOR_MAX_PROPERTY 16 |
#define DRM_CONNECTOR_LEN 32 |
#define DRM_CONNECTOR_MAX_ENCODER 2 |
/** |
* drm_encoder - central DRM encoder structure |
*/ |
struct drm_encoder { |
struct drm_device *dev; |
struct list_head head; |
struct drm_mode_object base; |
int encoder_type; |
uint32_t possible_crtcs; |
uint32_t possible_clones; |
struct drm_crtc *crtc; |
const struct drm_encoder_funcs *funcs; |
void *helper_private; |
}; |
enum drm_connector_force { |
DRM_FORCE_UNSPECIFIED, |
DRM_FORCE_OFF, |
DRM_FORCE_ON, /* force on analog part normally */ |
DRM_FORCE_ON_DIGITAL, /* for DVI-I use digital connector */ |
}; |
/** |
* drm_connector - central DRM connector control structure |
* @crtc: CRTC this connector is currently connected to, NULL if none |
* @interlace_allowed: can this connector handle interlaced modes? |
* @doublescan_allowed: can this connector handle doublescan? |
* @available_modes: modes available on this connector (from get_modes() + user) |
* @initial_x: initial x position for this connector |
* @initial_y: initial y position for this connector |
* @status: connector connected? |
* @funcs: connector control functions |
* |
* Each connector may be connected to one or more CRTCs, or may be clonable by |
* another connector if they can share a CRTC. Each connector also has a specific |
* position in the broader display (referred to as a 'screen' though it could |
* span multiple monitors). |
*/ |
struct drm_connector { |
struct drm_device *dev; |
// struct device kdev; |
struct device_attribute *attr; |
struct list_head head; |
struct drm_mode_object base; |
int connector_type; |
int connector_type_id; |
bool interlace_allowed; |
bool doublescan_allowed; |
struct list_head modes; /* list of modes on this connector */ |
int initial_x, initial_y; |
enum drm_connector_status status; |
/* these are modes added by probing with DDC or the BIOS */ |
struct list_head probed_modes; |
struct drm_display_info display_info; |
const struct drm_connector_funcs *funcs; |
struct list_head user_modes; |
struct drm_property_blob *edid_blob_ptr; |
u32 property_ids[DRM_CONNECTOR_MAX_PROPERTY]; |
uint64_t property_values[DRM_CONNECTOR_MAX_PROPERTY]; |
/* requested DPMS state */ |
int dpms; |
void *helper_private; |
/* forced on connector */ |
enum drm_connector_force force; |
uint32_t encoder_ids[DRM_CONNECTOR_MAX_ENCODER]; |
uint32_t force_encoder_id; |
struct drm_encoder *encoder; /* currently active encoder */ |
void *fb_helper_private; |
}; |
/** |
* struct drm_mode_set |
* |
* Represents a single crtc the connectors that it drives with what mode |
* and from which framebuffer it scans out from. |
* |
* This is used to set modes. |
*/ |
struct drm_mode_set { |
struct list_head head; |
struct drm_framebuffer *fb; |
struct drm_crtc *crtc; |
struct drm_display_mode *mode; |
uint32_t x; |
uint32_t y; |
struct drm_connector **connectors; |
size_t num_connectors; |
}; |
/** |
* struct drm_mode_config_funcs - configure CRTCs for a given screen layout |
* @resize: adjust CRTCs as necessary for the proposed layout |
* |
* Currently only a resize hook is available. DRM will call back into the |
* driver with a new screen width and height. If the driver can't support |
* the proposed size, it can return false. Otherwise it should adjust |
* the CRTC<->connector mappings as needed and update its view of the screen. |
*/ |
struct drm_mode_config_funcs { |
struct drm_framebuffer *(*fb_create)(struct drm_device *dev, struct drm_file *file_priv, struct drm_mode_fb_cmd *mode_cmd); |
int (*fb_changed)(struct drm_device *dev); |
}; |
struct drm_mode_group { |
uint32_t num_crtcs; |
uint32_t num_encoders; |
uint32_t num_connectors; |
/* list of object IDs for this group */ |
uint32_t *id_list; |
}; |
/** |
* drm_mode_config - Mode configuration control structure |
* |
*/ |
struct drm_mode_config { |
// struct mutex mutex; /* protects configuration (mode lists etc.) */ |
// struct mutex idr_mutex; /* for IDR management */ |
struct idr crtc_idr; /* use this idr for all IDs, fb, crtc, connector, modes - just makes life easier */ |
/* this is limited to one for now */ |
int num_fb; |
struct list_head fb_list; |
int num_connector; |
struct list_head connector_list; |
int num_encoder; |
struct list_head encoder_list; |
int num_crtc; |
struct list_head crtc_list; |
struct list_head property_list; |
/* in-kernel framebuffers - hung of filp_head in drm_framebuffer */ |
struct list_head fb_kernel_list; |
int min_width, min_height; |
int max_width, max_height; |
struct drm_mode_config_funcs *funcs; |
resource_size_t fb_base; |
/* pointers to standard properties */ |
struct list_head property_blob_list; |
struct drm_property *edid_property; |
struct drm_property *dpms_property; |
/* DVI-I properties */ |
struct drm_property *dvi_i_subconnector_property; |
struct drm_property *dvi_i_select_subconnector_property; |
/* TV properties */ |
struct drm_property *tv_subconnector_property; |
struct drm_property *tv_select_subconnector_property; |
struct drm_property *tv_mode_property; |
struct drm_property *tv_left_margin_property; |
struct drm_property *tv_right_margin_property; |
struct drm_property *tv_top_margin_property; |
struct drm_property *tv_bottom_margin_property; |
struct drm_property *tv_brightness_property; |
struct drm_property *tv_contrast_property; |
struct drm_property *tv_flicker_reduction_property; |
struct drm_property *tv_overscan_property; |
struct drm_property *tv_saturation_property; |
struct drm_property *tv_hue_property; |
/* Optional properties */ |
struct drm_property *scaling_mode_property; |
struct drm_property *dithering_mode_property; |
struct drm_property *dirty_info_property; |
}; |
#define obj_to_crtc(x) container_of(x, struct drm_crtc, base) |
#define obj_to_connector(x) container_of(x, struct drm_connector, base) |
#define obj_to_encoder(x) container_of(x, struct drm_encoder, base) |
#define obj_to_mode(x) container_of(x, struct drm_display_mode, base) |
#define obj_to_fb(x) container_of(x, struct drm_framebuffer, base) |
#define obj_to_property(x) container_of(x, struct drm_property, base) |
#define obj_to_blob(x) container_of(x, struct drm_property_blob, base) |
extern void drm_crtc_init(struct drm_device *dev, |
struct drm_crtc *crtc, |
const struct drm_crtc_funcs *funcs); |
extern void drm_crtc_cleanup(struct drm_crtc *crtc); |
extern void drm_connector_init(struct drm_device *dev, |
struct drm_connector *connector, |
const struct drm_connector_funcs *funcs, |
int connector_type); |
extern void drm_connector_cleanup(struct drm_connector *connector); |
extern void drm_encoder_init(struct drm_device *dev, |
struct drm_encoder *encoder, |
const struct drm_encoder_funcs *funcs, |
int encoder_type); |
extern void drm_encoder_cleanup(struct drm_encoder *encoder); |
extern char *drm_get_connector_name(struct drm_connector *connector); |
extern char *drm_get_dpms_name(int val); |
extern char *drm_get_dvi_i_subconnector_name(int val); |
extern char *drm_get_dvi_i_select_name(int val); |
extern char *drm_get_tv_subconnector_name(int val); |
extern char *drm_get_tv_select_name(int val); |
//extern void drm_fb_release(struct drm_file *file_priv); |
extern int drm_mode_group_init_legacy_group(struct drm_device *dev, struct drm_mode_group *group); |
extern struct edid *drm_get_edid(struct drm_connector *connector, |
struct i2c_adapter *adapter); |
//extern int drm_do_probe_ddc_edid(struct i2c_adapter *adapter, |
// unsigned char *buf, int len); |
extern int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid); |
extern void drm_mode_probed_add(struct drm_connector *connector, struct drm_display_mode *mode); |
extern void drm_mode_remove(struct drm_connector *connector, struct drm_display_mode *mode); |
extern struct drm_display_mode *drm_mode_duplicate(struct drm_device *dev, |
struct drm_display_mode *mode); |
extern void drm_mode_debug_printmodeline(struct drm_display_mode *mode); |
extern void drm_mode_config_init(struct drm_device *dev); |
extern void drm_mode_config_cleanup(struct drm_device *dev); |
extern void drm_mode_set_name(struct drm_display_mode *mode); |
extern bool drm_mode_equal(struct drm_display_mode *mode1, struct drm_display_mode *mode2); |
extern int drm_mode_width(struct drm_display_mode *mode); |
extern int drm_mode_height(struct drm_display_mode *mode); |
/* for us by fb module */ |
extern int drm_mode_attachmode_crtc(struct drm_device *dev, |
struct drm_crtc *crtc, |
struct drm_display_mode *mode); |
extern int drm_mode_detachmode_crtc(struct drm_device *dev, struct drm_display_mode *mode); |
extern struct drm_display_mode *drm_mode_create(struct drm_device *dev); |
extern void drm_mode_destroy(struct drm_device *dev, struct drm_display_mode *mode); |
extern void drm_mode_list_concat(struct list_head *head, |
struct list_head *new); |
extern void drm_mode_validate_size(struct drm_device *dev, |
struct list_head *mode_list, |
int maxX, int maxY, int maxPitch); |
extern void drm_mode_prune_invalid(struct drm_device *dev, |
struct list_head *mode_list, bool verbose); |
extern void drm_mode_sort(struct list_head *mode_list); |
extern int drm_mode_hsync(struct drm_display_mode *mode); |
extern int drm_mode_vrefresh(struct drm_display_mode *mode); |
extern void drm_mode_set_crtcinfo(struct drm_display_mode *p, |
int adjust_flags); |
extern void drm_mode_connector_list_update(struct drm_connector *connector); |
extern int drm_mode_connector_update_edid_property(struct drm_connector *connector, |
struct edid *edid); |
extern int drm_connector_property_set_value(struct drm_connector *connector, |
struct drm_property *property, |
uint64_t value); |
extern int drm_connector_property_get_value(struct drm_connector *connector, |
struct drm_property *property, |
uint64_t *value); |
extern struct drm_display_mode *drm_crtc_mode_create(struct drm_device *dev); |
extern void drm_framebuffer_set_object(struct drm_device *dev, |
unsigned long handle); |
extern int drm_framebuffer_init(struct drm_device *dev, |
struct drm_framebuffer *fb, |
const struct drm_framebuffer_funcs *funcs); |
extern void drm_framebuffer_cleanup(struct drm_framebuffer *fb); |
extern int drmfb_probe(struct drm_device *dev, struct drm_crtc *crtc); |
extern int drmfb_remove(struct drm_device *dev, struct drm_framebuffer *fb); |
extern void drm_crtc_probe_connector_modes(struct drm_device *dev, int maxX, int maxY); |
extern bool drm_crtc_in_use(struct drm_crtc *crtc); |
extern int drm_connector_attach_property(struct drm_connector *connector, |
struct drm_property *property, uint64_t init_val); |
extern struct drm_property *drm_property_create(struct drm_device *dev, int flags, |
const char *name, int num_values); |
extern void drm_property_destroy(struct drm_device *dev, struct drm_property *property); |
extern int drm_property_add_enum(struct drm_property *property, int index, |
uint64_t value, const char *name); |
extern int drm_mode_create_dvi_i_properties(struct drm_device *dev); |
extern int drm_mode_create_tv_properties(struct drm_device *dev, int num_formats, |
char *formats[]); |
extern int drm_mode_create_scaling_mode_property(struct drm_device *dev); |
extern int drm_mode_create_dithering_property(struct drm_device *dev); |
extern int drm_mode_create_dirty_info_property(struct drm_device *dev); |
extern char *drm_get_encoder_name(struct drm_encoder *encoder); |
extern int drm_mode_connector_attach_encoder(struct drm_connector *connector, |
struct drm_encoder *encoder); |
extern void drm_mode_connector_detach_encoder(struct drm_connector *connector, |
struct drm_encoder *encoder); |
extern bool drm_mode_crtc_set_gamma_size(struct drm_crtc *crtc, |
int gamma_size); |
extern struct drm_mode_object *drm_mode_object_find(struct drm_device *dev, |
uint32_t id, uint32_t type); |
/* IOCTLs */ |
extern int drm_mode_getresources(struct drm_device *dev, |
void *data, struct drm_file *file_priv); |
extern int drm_mode_getcrtc(struct drm_device *dev, |
void *data, struct drm_file *file_priv); |
extern int drm_mode_getconnector(struct drm_device *dev, |
void *data, struct drm_file *file_priv); |
extern int drm_mode_setcrtc(struct drm_device *dev, |
void *data, struct drm_file *file_priv); |
extern int drm_mode_cursor_ioctl(struct drm_device *dev, |
void *data, struct drm_file *file_priv); |
extern int drm_mode_addfb(struct drm_device *dev, |
void *data, struct drm_file *file_priv); |
extern int drm_mode_rmfb(struct drm_device *dev, |
void *data, struct drm_file *file_priv); |
extern int drm_mode_getfb(struct drm_device *dev, |
void *data, struct drm_file *file_priv); |
extern int drm_mode_dirtyfb_ioctl(struct drm_device *dev, |
void *data, struct drm_file *file_priv); |
extern int drm_mode_addmode_ioctl(struct drm_device *dev, |
void *data, struct drm_file *file_priv); |
extern int drm_mode_rmmode_ioctl(struct drm_device *dev, |
void *data, struct drm_file *file_priv); |
extern int drm_mode_attachmode_ioctl(struct drm_device *dev, |
void *data, struct drm_file *file_priv); |
extern int drm_mode_detachmode_ioctl(struct drm_device *dev, |
void *data, struct drm_file *file_priv); |
extern int drm_mode_getproperty_ioctl(struct drm_device *dev, |
void *data, struct drm_file *file_priv); |
extern int drm_mode_getblob_ioctl(struct drm_device *dev, |
void *data, struct drm_file *file_priv); |
extern int drm_mode_connector_property_set_ioctl(struct drm_device *dev, |
void *data, struct drm_file *file_priv); |
extern int drm_mode_hotplug_ioctl(struct drm_device *dev, |
void *data, struct drm_file *file_priv); |
extern int drm_mode_replacefb(struct drm_device *dev, |
void *data, struct drm_file *file_priv); |
extern int drm_mode_getencoder(struct drm_device *dev, |
void *data, struct drm_file *file_priv); |
extern int drm_mode_gamma_get_ioctl(struct drm_device *dev, |
void *data, struct drm_file *file_priv); |
extern int drm_mode_gamma_set_ioctl(struct drm_device *dev, |
void *data, struct drm_file *file_priv); |
extern bool drm_detect_hdmi_monitor(struct edid *edid); |
extern int drm_mode_page_flip_ioctl(struct drm_device *dev, |
void *data, struct drm_file *file_priv); |
extern struct drm_display_mode *drm_cvt_mode(struct drm_device *dev, |
int hdisplay, int vdisplay, int vrefresh, |
bool reduced, bool interlaced, bool margins); |
extern struct drm_display_mode *drm_gtf_mode(struct drm_device *dev, |
int hdisplay, int vdisplay, int vrefresh, |
bool interlaced, int margins); |
extern int drm_add_modes_noedid(struct drm_connector *connector, |
int hdisplay, int vdisplay); |
#endif /* __DRM_CRTC_H__ */ |
/drivers/include/drm/drm_crtc_helper.h |
---|
0,0 → 1,134 |
/* |
* Copyright © 2006 Keith Packard |
* Copyright © 2007-2008 Dave Airlie |
* Copyright © 2007-2008 Intel Corporation |
* Jesse Barnes <jesse.barnes@intel.com> |
* |
* Permission is hereby granted, free of charge, to any person obtaining a |
* copy of this software and associated documentation files (the "Software"), |
* to deal in the Software without restriction, including without limitation |
* the rights to use, copy, modify, merge, publish, distribute, sublicense, |
* and/or sell copies of the Software, and to permit persons to whom the |
* Software is furnished to do so, subject to the following conditions: |
* |
* The above copyright notice and this permission notice shall be included in |
* all copies or substantial portions of the Software. |
* |
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
* OTHER DEALINGS IN THE SOFTWARE. |
*/ |
/* |
* The DRM mode setting helper functions are common code for drivers to use if |
* they wish. Drivers are not forced to use this code in their |
* implementations but it would be useful if they code they do use at least |
* provides a consistent interface and operation to userspace |
*/ |
#ifndef __DRM_CRTC_HELPER_H__ |
#define __DRM_CRTC_HELPER_H__ |
//#include <linux/spinlock.h> |
#include <linux/types.h> |
//#include <linux/idr.h> |
#include <linux/fb.h> |
#include "drm_fb_helper.h" |
struct drm_crtc_helper_funcs { |
/* |
* Control power levels on the CRTC. If the mode passed in is |
* unsupported, the provider must use the next lowest power level. |
*/ |
void (*dpms)(struct drm_crtc *crtc, int mode); |
void (*prepare)(struct drm_crtc *crtc); |
void (*commit)(struct drm_crtc *crtc); |
/* Provider can fixup or change mode timings before modeset occurs */ |
bool (*mode_fixup)(struct drm_crtc *crtc, |
struct drm_display_mode *mode, |
struct drm_display_mode *adjusted_mode); |
/* Actually set the mode */ |
int (*mode_set)(struct drm_crtc *crtc, struct drm_display_mode *mode, |
struct drm_display_mode *adjusted_mode, int x, int y, |
struct drm_framebuffer *old_fb); |
/* Move the crtc on the current fb to the given position *optional* */ |
int (*mode_set_base)(struct drm_crtc *crtc, int x, int y, |
struct drm_framebuffer *old_fb); |
/* reload the current crtc LUT */ |
void (*load_lut)(struct drm_crtc *crtc); |
}; |
struct drm_encoder_helper_funcs { |
void (*dpms)(struct drm_encoder *encoder, int mode); |
void (*save)(struct drm_encoder *encoder); |
void (*restore)(struct drm_encoder *encoder); |
bool (*mode_fixup)(struct drm_encoder *encoder, |
struct drm_display_mode *mode, |
struct drm_display_mode *adjusted_mode); |
void (*prepare)(struct drm_encoder *encoder); |
void (*commit)(struct drm_encoder *encoder); |
void (*mode_set)(struct drm_encoder *encoder, |
struct drm_display_mode *mode, |
struct drm_display_mode *adjusted_mode); |
struct drm_crtc *(*get_crtc)(struct drm_encoder *encoder); |
/* detect for DAC style encoders */ |
enum drm_connector_status (*detect)(struct drm_encoder *encoder, |
struct drm_connector *connector); |
/* disable encoder when not in use - more explicit than dpms off */ |
void (*disable)(struct drm_encoder *encoder); |
}; |
struct drm_connector_helper_funcs { |
int (*get_modes)(struct drm_connector *connector); |
int (*mode_valid)(struct drm_connector *connector, |
struct drm_display_mode *mode); |
struct drm_encoder *(*best_encoder)(struct drm_connector *connector); |
}; |
extern int drm_helper_probe_single_connector_modes(struct drm_connector *connector, uint32_t maxX, uint32_t maxY); |
extern void drm_helper_disable_unused_functions(struct drm_device *dev); |
extern int drm_helper_hotplug_stage_two(struct drm_device *dev); |
extern bool drm_helper_initial_config(struct drm_device *dev); |
extern int drm_crtc_helper_set_config(struct drm_mode_set *set); |
extern bool drm_crtc_helper_set_mode(struct drm_crtc *crtc, |
struct drm_display_mode *mode, |
int x, int y, |
struct drm_framebuffer *old_fb); |
extern bool drm_helper_crtc_in_use(struct drm_crtc *crtc); |
extern bool drm_helper_encoder_in_use(struct drm_encoder *encoder); |
extern void drm_helper_connector_dpms(struct drm_connector *connector, int mode); |
extern int drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb, |
struct drm_mode_fb_cmd *mode_cmd); |
static inline void drm_crtc_helper_add(struct drm_crtc *crtc, |
const struct drm_crtc_helper_funcs *funcs) |
{ |
crtc->helper_private = (void *)funcs; |
} |
static inline void drm_encoder_helper_add(struct drm_encoder *encoder, |
const struct drm_encoder_helper_funcs *funcs) |
{ |
encoder->helper_private = (void *)funcs; |
} |
static inline int drm_connector_helper_add(struct drm_connector *connector, |
const struct drm_connector_helper_funcs *funcs) |
{ |
connector->helper_private = (void *)funcs; |
return drm_fb_helper_add_connector(connector); |
} |
extern int drm_helper_resume_force_mode(struct drm_device *dev); |
#endif |
/drivers/include/drm/drm_dp_helper.h |
---|
0,0 → 1,180 |
/* |
* Copyright © 2008 Keith Packard |
* |
* Permission to use, copy, modify, distribute, and sell this software and its |
* documentation for any purpose is hereby granted without fee, provided that |
* the above copyright notice appear in all copies and that both that copyright |
* notice and this permission notice appear in supporting documentation, and |
* that the name of the copyright holders not be used in advertising or |
* publicity pertaining to distribution of the software without specific, |
* written prior permission. The copyright holders make no representations |
* about the suitability of this software for any purpose. It is provided "as |
* is" without express or implied warranty. |
* |
* THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, |
* INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO |
* EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR |
* CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, |
* DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER |
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE |
* OF THIS SOFTWARE. |
*/ |
#ifndef _DRM_DP_HELPER_H_ |
#define _DRM_DP_HELPER_H_ |
/* From the VESA DisplayPort spec */ |
#define AUX_NATIVE_WRITE 0x8 |
#define AUX_NATIVE_READ 0x9 |
#define AUX_I2C_WRITE 0x0 |
#define AUX_I2C_READ 0x1 |
#define AUX_I2C_STATUS 0x2 |
#define AUX_I2C_MOT 0x4 |
#define AUX_NATIVE_REPLY_ACK (0x0 << 4) |
#define AUX_NATIVE_REPLY_NACK (0x1 << 4) |
#define AUX_NATIVE_REPLY_DEFER (0x2 << 4) |
#define AUX_NATIVE_REPLY_MASK (0x3 << 4) |
#define AUX_I2C_REPLY_ACK (0x0 << 6) |
#define AUX_I2C_REPLY_NACK (0x1 << 6) |
#define AUX_I2C_REPLY_DEFER (0x2 << 6) |
#define AUX_I2C_REPLY_MASK (0x3 << 6) |
/* AUX CH addresses */ |
/* DPCD */ |
#define DP_DPCD_REV 0x000 |
#define DP_MAX_LINK_RATE 0x001 |
#define DP_MAX_LANE_COUNT 0x002 |
# define DP_MAX_LANE_COUNT_MASK 0x1f |
# define DP_ENHANCED_FRAME_CAP (1 << 7) |
#define DP_MAX_DOWNSPREAD 0x003 |
# define DP_NO_AUX_HANDSHAKE_LINK_TRAINING (1 << 6) |
#define DP_NORP 0x004 |
#define DP_DOWNSTREAMPORT_PRESENT 0x005 |
# define DP_DWN_STRM_PORT_PRESENT (1 << 0) |
# define DP_DWN_STRM_PORT_TYPE_MASK 0x06 |
/* 00b = DisplayPort */ |
/* 01b = Analog */ |
/* 10b = TMDS or HDMI */ |
/* 11b = Other */ |
# define DP_FORMAT_CONVERSION (1 << 3) |
#define DP_MAIN_LINK_CHANNEL_CODING 0x006 |
/* link configuration */ |
#define DP_LINK_BW_SET 0x100 |
# define DP_LINK_BW_1_62 0x06 |
# define DP_LINK_BW_2_7 0x0a |
#define DP_LANE_COUNT_SET 0x101 |
# define DP_LANE_COUNT_MASK 0x0f |
# define DP_LANE_COUNT_ENHANCED_FRAME_EN (1 << 7) |
#define DP_TRAINING_PATTERN_SET 0x102 |
# define DP_TRAINING_PATTERN_DISABLE 0 |
# define DP_TRAINING_PATTERN_1 1 |
# define DP_TRAINING_PATTERN_2 2 |
# define DP_TRAINING_PATTERN_MASK 0x3 |
# define DP_LINK_QUAL_PATTERN_DISABLE (0 << 2) |
# define DP_LINK_QUAL_PATTERN_D10_2 (1 << 2) |
# define DP_LINK_QUAL_PATTERN_ERROR_RATE (2 << 2) |
# define DP_LINK_QUAL_PATTERN_PRBS7 (3 << 2) |
# define DP_LINK_QUAL_PATTERN_MASK (3 << 2) |
# define DP_RECOVERED_CLOCK_OUT_EN (1 << 4) |
# define DP_LINK_SCRAMBLING_DISABLE (1 << 5) |
# define DP_SYMBOL_ERROR_COUNT_BOTH (0 << 6) |
# define DP_SYMBOL_ERROR_COUNT_DISPARITY (1 << 6) |
# define DP_SYMBOL_ERROR_COUNT_SYMBOL (2 << 6) |
# define DP_SYMBOL_ERROR_COUNT_MASK (3 << 6) |
#define DP_TRAINING_LANE0_SET 0x103 |
#define DP_TRAINING_LANE1_SET 0x104 |
#define DP_TRAINING_LANE2_SET 0x105 |
#define DP_TRAINING_LANE3_SET 0x106 |
# define DP_TRAIN_VOLTAGE_SWING_MASK 0x3 |
# define DP_TRAIN_VOLTAGE_SWING_SHIFT 0 |
# define DP_TRAIN_MAX_SWING_REACHED (1 << 2) |
# define DP_TRAIN_VOLTAGE_SWING_400 (0 << 0) |
# define DP_TRAIN_VOLTAGE_SWING_600 (1 << 0) |
# define DP_TRAIN_VOLTAGE_SWING_800 (2 << 0) |
# define DP_TRAIN_VOLTAGE_SWING_1200 (3 << 0) |
# define DP_TRAIN_PRE_EMPHASIS_MASK (3 << 3) |
# define DP_TRAIN_PRE_EMPHASIS_0 (0 << 3) |
# define DP_TRAIN_PRE_EMPHASIS_3_5 (1 << 3) |
# define DP_TRAIN_PRE_EMPHASIS_6 (2 << 3) |
# define DP_TRAIN_PRE_EMPHASIS_9_5 (3 << 3) |
# define DP_TRAIN_PRE_EMPHASIS_SHIFT 3 |
# define DP_TRAIN_MAX_PRE_EMPHASIS_REACHED (1 << 5) |
#define DP_DOWNSPREAD_CTRL 0x107 |
# define DP_SPREAD_AMP_0_5 (1 << 4) |
#define DP_MAIN_LINK_CHANNEL_CODING_SET 0x108 |
# define DP_SET_ANSI_8B10B (1 << 0) |
#define DP_LANE0_1_STATUS 0x202 |
#define DP_LANE2_3_STATUS 0x203 |
# define DP_LANE_CR_DONE (1 << 0) |
# define DP_LANE_CHANNEL_EQ_DONE (1 << 1) |
# define DP_LANE_SYMBOL_LOCKED (1 << 2) |
#define DP_CHANNEL_EQ_BITS (DP_LANE_CR_DONE | \ |
DP_LANE_CHANNEL_EQ_DONE | \ |
DP_LANE_SYMBOL_LOCKED) |
#define DP_LANE_ALIGN_STATUS_UPDATED 0x204 |
#define DP_INTERLANE_ALIGN_DONE (1 << 0) |
#define DP_DOWNSTREAM_PORT_STATUS_CHANGED (1 << 6) |
#define DP_LINK_STATUS_UPDATED (1 << 7) |
#define DP_SINK_STATUS 0x205 |
#define DP_RECEIVE_PORT_0_STATUS (1 << 0) |
#define DP_RECEIVE_PORT_1_STATUS (1 << 1) |
#define DP_ADJUST_REQUEST_LANE0_1 0x206 |
#define DP_ADJUST_REQUEST_LANE2_3 0x207 |
# define DP_ADJUST_VOLTAGE_SWING_LANE0_MASK 0x03 |
# define DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT 0 |
# define DP_ADJUST_PRE_EMPHASIS_LANE0_MASK 0x0c |
# define DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT 2 |
# define DP_ADJUST_VOLTAGE_SWING_LANE1_MASK 0x30 |
# define DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT 4 |
# define DP_ADJUST_PRE_EMPHASIS_LANE1_MASK 0xc0 |
# define DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT 6 |
#define DP_SET_POWER 0x600 |
# define DP_SET_POWER_D0 0x1 |
# define DP_SET_POWER_D3 0x2 |
#define MODE_I2C_START 1 |
#define MODE_I2C_WRITE 2 |
#define MODE_I2C_READ 4 |
#define MODE_I2C_STOP 8 |
struct i2c_algo_dp_aux_data { |
bool running; |
u16 address; |
int (*aux_ch) (struct i2c_adapter *adapter, |
int mode, uint8_t write_byte, |
uint8_t *read_byte); |
}; |
int |
i2c_dp_aux_add_bus(struct i2c_adapter *adapter); |
#endif /* _DRM_DP_HELPER_H_ */ |
/drivers/include/drm/drm_edid.h |
---|
0,0 → 1,204 |
/* |
* Copyright © 2007-2008 Intel Corporation |
* Jesse Barnes <jesse.barnes@intel.com> |
* |
* Permission is hereby granted, free of charge, to any person obtaining a |
* copy of this software and associated documentation files (the "Software"), |
* to deal in the Software without restriction, including without limitation |
* the rights to use, copy, modify, merge, publish, distribute, sublicense, |
* and/or sell copies of the Software, and to permit persons to whom the |
* Software is furnished to do so, subject to the following conditions: |
* |
* The above copyright notice and this permission notice shall be included in |
* all copies or substantial portions of the Software. |
* |
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
* OTHER DEALINGS IN THE SOFTWARE. |
*/ |
#ifndef __DRM_EDID_H__ |
#define __DRM_EDID_H__ |
#include <linux/types.h> |
#define EDID_LENGTH 128 |
#define DDC_ADDR 0x50 |
struct est_timings { |
u8 t1; |
u8 t2; |
u8 mfg_rsvd; |
} __attribute__((packed)); |
/* 00=16:10, 01=4:3, 10=5:4, 11=16:9 */ |
#define EDID_TIMING_ASPECT_SHIFT 6 |
#define EDID_TIMING_ASPECT_MASK (0x3 << EDID_TIMING_ASPECT_SHIFT) |
/* need to add 60 */ |
#define EDID_TIMING_VFREQ_SHIFT 0 |
#define EDID_TIMING_VFREQ_MASK (0x3f << EDID_TIMING_VFREQ_SHIFT) |
struct std_timing { |
u8 hsize; /* need to multiply by 8 then add 248 */ |
u8 vfreq_aspect; |
} __attribute__((packed)); |
#define DRM_EDID_PT_HSYNC_POSITIVE (1 << 1) |
#define DRM_EDID_PT_VSYNC_POSITIVE (1 << 2) |
#define DRM_EDID_PT_SEPARATE_SYNC (3 << 3) |
#define DRM_EDID_PT_STEREO (1 << 5) |
#define DRM_EDID_PT_INTERLACED (1 << 7) |
/* If detailed data is pixel timing */ |
struct detailed_pixel_timing { |
u8 hactive_lo; |
u8 hblank_lo; |
u8 hactive_hblank_hi; |
u8 vactive_lo; |
u8 vblank_lo; |
u8 vactive_vblank_hi; |
u8 hsync_offset_lo; |
u8 hsync_pulse_width_lo; |
u8 vsync_offset_pulse_width_lo; |
u8 hsync_vsync_offset_pulse_width_hi; |
u8 width_mm_lo; |
u8 height_mm_lo; |
u8 width_height_mm_hi; |
u8 hborder; |
u8 vborder; |
u8 misc; |
} __attribute__((packed)); |
/* If it's not pixel timing, it'll be one of the below */ |
struct detailed_data_string { |
u8 str[13]; |
} __attribute__((packed)); |
struct detailed_data_monitor_range { |
u8 min_vfreq; |
u8 max_vfreq; |
u8 min_hfreq_khz; |
u8 max_hfreq_khz; |
u8 pixel_clock_mhz; /* need to multiply by 10 */ |
__le16 sec_gtf_toggle; /* A000=use above, 20=use below */ |
u8 hfreq_start_khz; /* need to multiply by 2 */ |
u8 c; /* need to divide by 2 */ |
__le16 m; |
u8 k; |
u8 j; /* need to divide by 2 */ |
} __attribute__((packed)); |
struct detailed_data_wpindex { |
u8 white_yx_lo; /* Lower 2 bits each */ |
u8 white_x_hi; |
u8 white_y_hi; |
u8 gamma; /* need to divide by 100 then add 1 */ |
} __attribute__((packed)); |
struct detailed_data_color_point { |
u8 windex1; |
u8 wpindex1[3]; |
u8 windex2; |
u8 wpindex2[3]; |
} __attribute__((packed)); |
struct cvt_timing { |
u8 code[3]; |
} __attribute__((packed)); |
struct detailed_non_pixel { |
u8 pad1; |
u8 type; /* ff=serial, fe=string, fd=monitor range, fc=monitor name |
fb=color point data, fa=standard timing data, |
f9=undefined, f8=mfg. reserved */ |
u8 pad2; |
union { |
struct detailed_data_string str; |
struct detailed_data_monitor_range range; |
struct detailed_data_wpindex color; |
struct std_timing timings[5]; |
struct cvt_timing cvt[4]; |
} data; |
} __attribute__((packed)); |
#define EDID_DETAIL_EST_TIMINGS 0xf7 |
#define EDID_DETAIL_CVT_3BYTE 0xf8 |
#define EDID_DETAIL_COLOR_MGMT_DATA 0xf9 |
#define EDID_DETAIL_STD_MODES 0xfa |
#define EDID_DETAIL_MONITOR_CPDATA 0xfb |
#define EDID_DETAIL_MONITOR_NAME 0xfc |
#define EDID_DETAIL_MONITOR_RANGE 0xfd |
#define EDID_DETAIL_MONITOR_STRING 0xfe |
#define EDID_DETAIL_MONITOR_SERIAL 0xff |
struct detailed_timing { |
__le16 pixel_clock; /* need to multiply by 10 KHz */ |
union { |
struct detailed_pixel_timing pixel_data; |
struct detailed_non_pixel other_data; |
} data; |
} __attribute__((packed)); |
#define DRM_EDID_INPUT_SERRATION_VSYNC (1 << 0) |
#define DRM_EDID_INPUT_SYNC_ON_GREEN (1 << 1) |
#define DRM_EDID_INPUT_COMPOSITE_SYNC (1 << 2) |
#define DRM_EDID_INPUT_SEPARATE_SYNCS (1 << 3) |
#define DRM_EDID_INPUT_BLANK_TO_BLACK (1 << 4) |
#define DRM_EDID_INPUT_VIDEO_LEVEL (3 << 5) |
#define DRM_EDID_INPUT_DIGITAL (1 << 7) /* bits below must be zero if set */ |
#define DRM_EDID_FEATURE_DEFAULT_GTF (1 << 0) |
#define DRM_EDID_FEATURE_PREFERRED_TIMING (1 << 1) |
#define DRM_EDID_FEATURE_STANDARD_COLOR (1 << 2) |
#define DRM_EDID_FEATURE_DISPLAY_TYPE (3 << 3) /* 00=mono, 01=rgb, 10=non-rgb, 11=unknown */ |
#define DRM_EDID_FEATURE_PM_ACTIVE_OFF (1 << 5) |
#define DRM_EDID_FEATURE_PM_SUSPEND (1 << 6) |
#define DRM_EDID_FEATURE_PM_STANDBY (1 << 7) |
struct edid { |
u8 header[8]; |
/* Vendor & product info */ |
u8 mfg_id[2]; |
u8 prod_code[2]; |
u32 serial; /* FIXME: byte order */ |
u8 mfg_week; |
u8 mfg_year; |
/* EDID version */ |
u8 version; |
u8 revision; |
/* Display info: */ |
u8 input; |
u8 width_cm; |
u8 height_cm; |
u8 gamma; |
u8 features; |
/* Color characteristics */ |
u8 red_green_lo; |
u8 black_white_lo; |
u8 red_x; |
u8 red_y; |
u8 green_x; |
u8 green_y; |
u8 blue_x; |
u8 blue_y; |
u8 white_x; |
u8 white_y; |
/* Est. timings and mfg rsvd timings*/ |
struct est_timings established_timings; |
/* Standard timings 1-8*/ |
struct std_timing standard_timings[8]; |
/* Detailing timings 1-4 */ |
struct detailed_timing detailed_timings[4]; |
/* Number of 128 byte ext. blocks */ |
u8 extensions; |
/* Checksum */ |
u8 checksum; |
} __attribute__((packed)); |
#define EDID_PRODUCT_ID(e) ((e)->prod_code[0] | ((e)->prod_code[1] << 8)) |
#endif /* __DRM_EDID_H__ */ |
/drivers/include/drm/drm_fb_helper.h |
---|
0,0 → 1,111 |
/* |
* Copyright (c) 2006-2009 Red Hat Inc. |
* Copyright (c) 2006-2008 Intel Corporation |
* Copyright (c) 2007 Dave Airlie <airlied@linux.ie> |
* |
* DRM framebuffer helper functions |
* |
* Permission to use, copy, modify, distribute, and sell this software and its |
* documentation for any purpose is hereby granted without fee, provided that |
* the above copyright notice appear in all copies and that both that copyright |
* notice and this permission notice appear in supporting documentation, and |
* that the name of the copyright holders not be used in advertising or |
* publicity pertaining to distribution of the software without specific, |
* written prior permission. The copyright holders make no representations |
* about the suitability of this software for any purpose. It is provided "as |
* is" without express or implied warranty. |
* |
* THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, |
* INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO |
* EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR |
* CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, |
* DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER |
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE |
* OF THIS SOFTWARE. |
* |
* Authors: |
* Dave Airlie <airlied@linux.ie> |
* Jesse Barnes <jesse.barnes@intel.com> |
*/ |
#ifndef DRM_FB_HELPER_H |
#define DRM_FB_HELPER_H |
struct drm_fb_helper_crtc { |
uint32_t crtc_id; |
struct drm_mode_set mode_set; |
}; |
struct drm_fb_helper_funcs { |
void (*gamma_set)(struct drm_crtc *crtc, u16 red, u16 green, |
u16 blue, int regno); |
void (*gamma_get)(struct drm_crtc *crtc, u16 *red, u16 *green, |
u16 *blue, int regno); |
}; |
/* mode specified on the command line */ |
struct drm_fb_helper_cmdline_mode { |
bool specified; |
bool refresh_specified; |
bool bpp_specified; |
int xres, yres; |
int bpp; |
int refresh; |
bool rb; |
bool interlace; |
bool cvt; |
bool margins; |
}; |
struct drm_fb_helper_connector { |
struct drm_fb_helper_cmdline_mode cmdline_mode; |
}; |
struct drm_fb_helper { |
struct drm_framebuffer *fb; |
struct drm_device *dev; |
struct drm_display_mode *mode; |
int crtc_count; |
struct drm_fb_helper_crtc *crtc_info; |
struct drm_fb_helper_funcs *funcs; |
int conn_limit; |
struct list_head kernel_fb_list; |
}; |
int drm_fb_helper_single_fb_probe(struct drm_device *dev, |
int preferred_bpp, |
int (*fb_create)(struct drm_device *dev, |
uint32_t fb_width, |
uint32_t fb_height, |
uint32_t surface_width, |
uint32_t surface_height, |
uint32_t surface_depth, |
uint32_t surface_bpp, |
struct drm_framebuffer **fb_ptr)); |
int drm_fb_helper_init_crtc_count(struct drm_fb_helper *helper, int crtc_count, |
int max_conn); |
void drm_fb_helper_free(struct drm_fb_helper *helper); |
int drm_fb_helper_blank(int blank, struct fb_info *info); |
int drm_fb_helper_pan_display(struct fb_var_screeninfo *var, |
struct fb_info *info); |
int drm_fb_helper_set_par(struct fb_info *info); |
int drm_fb_helper_check_var(struct fb_var_screeninfo *var, |
struct fb_info *info); |
int drm_fb_helper_setcolreg(unsigned regno, |
unsigned red, |
unsigned green, |
unsigned blue, |
unsigned transp, |
struct fb_info *info); |
void drm_fb_helper_restore(void); |
void drm_fb_helper_fill_var(struct fb_info *info, struct drm_framebuffer *fb, |
uint32_t fb_width, uint32_t fb_height); |
void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch, |
uint32_t depth); |
int drm_fb_helper_add_connector(struct drm_connector *connector); |
int drm_fb_helper_parse_command_line(struct drm_device *dev); |
int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info); |
#endif |
/drivers/include/drm/drm_hashtab.h |
---|
0,0 → 1,69 |
/************************************************************************** |
* |
* Copyright 2006 Tungsten Graphics, Inc., Bismack, ND. USA. |
* All Rights Reserved. |
* |
* Permission is hereby granted, free of charge, to any person obtaining a |
* copy of this software and associated documentation files (the |
* "Software"), to deal in the Software without restriction, including |
* without limitation the rights to use, copy, modify, merge, publish, |
* distribute, sub license, and/or sell copies of the Software, and to |
* permit persons to whom the Software is furnished to do so, subject to |
* the following conditions: |
* |
* The above copyright notice and this permission notice (including the |
* next paragraph) shall be included in all copies or substantial portions |
* of the Software. |
* |
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL |
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, |
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR |
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE |
* USE OR OTHER DEALINGS IN THE SOFTWARE. |
* |
* |
**************************************************************************/ |
/* |
* Simple open hash tab implementation. |
* |
* Authors: |
* Thomas Hellström <thomas-at-tungstengraphics-dot-com> |
*/ |
#ifndef DRM_HASHTAB_H |
#define DRM_HASHTAB_H |
#include <linux/list.h> |
#define drm_hash_entry(_ptr, _type, _member) container_of(_ptr, _type, _member) |
struct drm_hash_item { |
struct hlist_node head; |
unsigned long key; |
}; |
struct drm_open_hash { |
unsigned int size; |
unsigned int order; |
unsigned int fill; |
struct hlist_head *table; |
int use_vmalloc; |
}; |
extern int drm_ht_create(struct drm_open_hash *ht, unsigned int order); |
extern int drm_ht_insert_item(struct drm_open_hash *ht, struct drm_hash_item *item); |
extern int drm_ht_just_insert_please(struct drm_open_hash *ht, struct drm_hash_item *item, |
unsigned long seed, int bits, int shift, |
unsigned long add); |
extern int drm_ht_find_item(struct drm_open_hash *ht, unsigned long key, struct drm_hash_item **item); |
extern void drm_ht_verbose_list(struct drm_open_hash *ht, unsigned long key); |
extern int drm_ht_remove_key(struct drm_open_hash *ht, unsigned long key); |
extern int drm_ht_remove_item(struct drm_open_hash *ht, struct drm_hash_item *item); |
extern void drm_ht_remove(struct drm_open_hash *ht); |
#endif |
/drivers/include/drm/drm_mm.h |
---|
0,0 → 1,141 |
/************************************************************************** |
* |
* Copyright 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX. USA. |
* All Rights Reserved. |
* |
* Permission is hereby granted, free of charge, to any person obtaining a |
* copy of this software and associated documentation files (the |
* "Software"), to deal in the Software without restriction, including |
* without limitation the rights to use, copy, modify, merge, publish, |
* distribute, sub license, and/or sell copies of the Software, and to |
* permit persons to whom the Software is furnished to do so, subject to |
* the following conditions: |
* |
* The above copyright notice and this permission notice (including the |
* next paragraph) shall be included in all copies or substantial portions |
* of the Software. |
* |
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL |
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, |
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR |
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE |
* USE OR OTHER DEALINGS IN THE SOFTWARE. |
* |
* |
**************************************************************************/ |
/* |
* Authors: |
* Thomas Hellstrom <thomas-at-tungstengraphics-dot-com> |
*/ |
#ifndef _DRM_MM_H_ |
#define _DRM_MM_H_ |
/* |
* Generic range manager structs |
*/ |
#include <linux/list.h> |
#ifdef CONFIG_DEBUG_FS |
#include <linux/seq_file.h> |
#endif |
struct drm_mm_node { |
struct list_head fl_entry; |
struct list_head ml_entry; |
int free; |
unsigned long start; |
unsigned long size; |
struct drm_mm *mm; |
void *private; |
}; |
struct drm_mm { |
struct list_head fl_entry; |
struct list_head ml_entry; |
struct list_head unused_nodes; |
int num_unused; |
spinlock_t unused_lock; |
}; |
/* |
* Basic range manager support (drm_mm.c) |
*/ |
extern struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *node, |
unsigned long size, |
unsigned alignment, |
int atomic); |
extern struct drm_mm_node *drm_mm_get_block_range_generic( |
struct drm_mm_node *node, |
unsigned long size, |
unsigned alignment, |
unsigned long start, |
unsigned long end, |
int atomic); |
static inline struct drm_mm_node *drm_mm_get_block(struct drm_mm_node *parent, |
unsigned long size, |
unsigned alignment) |
{ |
return drm_mm_get_block_generic(parent, size, alignment, 0); |
} |
static inline struct drm_mm_node *drm_mm_get_block_atomic(struct drm_mm_node *parent, |
unsigned long size, |
unsigned alignment) |
{ |
return drm_mm_get_block_generic(parent, size, alignment, 1); |
} |
static inline struct drm_mm_node *drm_mm_get_block_range( |
struct drm_mm_node *parent, |
unsigned long size, |
unsigned alignment, |
unsigned long start, |
unsigned long end) |
{ |
return drm_mm_get_block_range_generic(parent, size, alignment, |
start, end, 0); |
} |
static inline struct drm_mm_node *drm_mm_get_block_atomic_range( |
struct drm_mm_node *parent, |
unsigned long size, |
unsigned alignment, |
unsigned long start, |
unsigned long end) |
{ |
return drm_mm_get_block_range_generic(parent, size, alignment, |
start, end, 1); |
} |
extern void drm_mm_put_block(struct drm_mm_node *cur); |
extern struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm, |
unsigned long size, |
unsigned alignment, |
int best_match); |
extern struct drm_mm_node *drm_mm_search_free_in_range( |
const struct drm_mm *mm, |
unsigned long size, |
unsigned alignment, |
unsigned long start, |
unsigned long end, |
int best_match); |
extern int drm_mm_init(struct drm_mm *mm, unsigned long start, |
unsigned long size); |
extern void drm_mm_takedown(struct drm_mm *mm); |
extern int drm_mm_clean(struct drm_mm *mm); |
extern unsigned long drm_mm_tail_space(struct drm_mm *mm); |
extern int drm_mm_remove_space_from_tail(struct drm_mm *mm, |
unsigned long size); |
extern int drm_mm_add_space_to_tail(struct drm_mm *mm, |
unsigned long size, int atomic); |
extern int drm_mm_pre_get(struct drm_mm *mm); |
static inline struct drm_mm *drm_get_mm(struct drm_mm_node *block) |
{ |
return block->mm; |
} |
extern void drm_mm_debug_table(struct drm_mm *mm, const char *prefix); |
#ifdef CONFIG_DEBUG_FS |
int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm); |
#endif |
#endif |
/drivers/include/drm/drm_mode.h |
---|
0,0 → 1,346 |
/* |
* Copyright (c) 2007 Dave Airlie <airlied@linux.ie> |
* Copyright (c) 2007 Jakob Bornecrantz <wallbraker@gmail.com> |
* Copyright (c) 2008 Red Hat Inc. |
* Copyright (c) 2007-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA |
* Copyright (c) 2007-2008 Intel Corporation |
* |
* Permission is hereby granted, free of charge, to any person obtaining a |
* copy of this software and associated documentation files (the "Software"), |
* to deal in the Software without restriction, including without limitation |
* the rights to use, copy, modify, merge, publish, distribute, sublicense, |
* and/or sell copies of the Software, and to permit persons to whom the |
* Software is furnished to do so, subject to the following conditions: |
* |
* The above copyright notice and this permission notice shall be included in |
* all copies or substantial portions of the Software. |
* |
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE |
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS |
* IN THE SOFTWARE. |
*/ |
#ifndef _DRM_MODE_H |
#define _DRM_MODE_H |
#define DRM_DISPLAY_INFO_LEN 32 |
#define DRM_CONNECTOR_NAME_LEN 32 |
#define DRM_DISPLAY_MODE_LEN 32 |
#define DRM_PROP_NAME_LEN 32 |
#define DRM_MODE_TYPE_BUILTIN (1<<0) |
#define DRM_MODE_TYPE_CLOCK_C ((1<<1) | DRM_MODE_TYPE_BUILTIN) |
#define DRM_MODE_TYPE_CRTC_C ((1<<2) | DRM_MODE_TYPE_BUILTIN) |
#define DRM_MODE_TYPE_PREFERRED (1<<3) |
#define DRM_MODE_TYPE_DEFAULT (1<<4) |
#define DRM_MODE_TYPE_USERDEF (1<<5) |
#define DRM_MODE_TYPE_DRIVER (1<<6) |
/* Video mode flags */ |
/* bit compatible with the xorg definitions. */ |
#define DRM_MODE_FLAG_PHSYNC (1<<0) |
#define DRM_MODE_FLAG_NHSYNC (1<<1) |
#define DRM_MODE_FLAG_PVSYNC (1<<2) |
#define DRM_MODE_FLAG_NVSYNC (1<<3) |
#define DRM_MODE_FLAG_INTERLACE (1<<4) |
#define DRM_MODE_FLAG_DBLSCAN (1<<5) |
#define DRM_MODE_FLAG_CSYNC (1<<6) |
#define DRM_MODE_FLAG_PCSYNC (1<<7) |
#define DRM_MODE_FLAG_NCSYNC (1<<8) |
#define DRM_MODE_FLAG_HSKEW (1<<9) /* hskew provided */ |
#define DRM_MODE_FLAG_BCAST (1<<10) |
#define DRM_MODE_FLAG_PIXMUX (1<<11) |
#define DRM_MODE_FLAG_DBLCLK (1<<12) |
#define DRM_MODE_FLAG_CLKDIV2 (1<<13) |
/* DPMS flags */ |
/* bit compatible with the xorg definitions. */ |
#define DRM_MODE_DPMS_ON 0 |
#define DRM_MODE_DPMS_STANDBY 1 |
#define DRM_MODE_DPMS_SUSPEND 2 |
#define DRM_MODE_DPMS_OFF 3 |
/* Scaling mode options */ |
#define DRM_MODE_SCALE_NONE 0 /* Unmodified timing (display or |
software can still scale) */ |
#define DRM_MODE_SCALE_FULLSCREEN 1 /* Full screen, ignore aspect */ |
#define DRM_MODE_SCALE_CENTER 2 /* Centered, no scaling */ |
#define DRM_MODE_SCALE_ASPECT 3 /* Full screen, preserve aspect */ |
/* Dithering mode options */ |
#define DRM_MODE_DITHERING_OFF 0 |
#define DRM_MODE_DITHERING_ON 1 |
/* Dirty info options */ |
#define DRM_MODE_DIRTY_OFF 0 |
#define DRM_MODE_DIRTY_ON 1 |
#define DRM_MODE_DIRTY_ANNOTATE 2 |
struct drm_mode_modeinfo { |
__u32 clock; |
__u16 hdisplay, hsync_start, hsync_end, htotal, hskew; |
__u16 vdisplay, vsync_start, vsync_end, vtotal, vscan; |
__u32 vrefresh; |
__u32 flags; |
__u32 type; |
char name[DRM_DISPLAY_MODE_LEN]; |
}; |
struct drm_mode_card_res { |
__u64 fb_id_ptr; |
__u64 crtc_id_ptr; |
__u64 connector_id_ptr; |
__u64 encoder_id_ptr; |
__u32 count_fbs; |
__u32 count_crtcs; |
__u32 count_connectors; |
__u32 count_encoders; |
__u32 min_width, max_width; |
__u32 min_height, max_height; |
}; |
struct drm_mode_crtc { |
__u64 set_connectors_ptr; |
__u32 count_connectors; |
__u32 crtc_id; /**< Id */ |
__u32 fb_id; /**< Id of framebuffer */ |
__u32 x, y; /**< Position on the frameuffer */ |
__u32 gamma_size; |
__u32 mode_valid; |
struct drm_mode_modeinfo mode; |
}; |
#define DRM_MODE_ENCODER_NONE 0 |
#define DRM_MODE_ENCODER_DAC 1 |
#define DRM_MODE_ENCODER_TMDS 2 |
#define DRM_MODE_ENCODER_LVDS 3 |
#define DRM_MODE_ENCODER_TVDAC 4 |
struct drm_mode_get_encoder { |
__u32 encoder_id; |
__u32 encoder_type; |
__u32 crtc_id; /**< Id of crtc */ |
__u32 possible_crtcs; |
__u32 possible_clones; |
}; |
/* This is for connectors with multiple signal types. */ |
/* Try to match DRM_MODE_CONNECTOR_X as closely as possible. */ |
#define DRM_MODE_SUBCONNECTOR_Automatic 0 |
#define DRM_MODE_SUBCONNECTOR_Unknown 0 |
#define DRM_MODE_SUBCONNECTOR_DVID 3 |
#define DRM_MODE_SUBCONNECTOR_DVIA 4 |
#define DRM_MODE_SUBCONNECTOR_Composite 5 |
#define DRM_MODE_SUBCONNECTOR_SVIDEO 6 |
#define DRM_MODE_SUBCONNECTOR_Component 8 |
#define DRM_MODE_SUBCONNECTOR_SCART 9 |
#define DRM_MODE_CONNECTOR_Unknown 0 |
#define DRM_MODE_CONNECTOR_VGA 1 |
#define DRM_MODE_CONNECTOR_DVII 2 |
#define DRM_MODE_CONNECTOR_DVID 3 |
#define DRM_MODE_CONNECTOR_DVIA 4 |
#define DRM_MODE_CONNECTOR_Composite 5 |
#define DRM_MODE_CONNECTOR_SVIDEO 6 |
#define DRM_MODE_CONNECTOR_LVDS 7 |
#define DRM_MODE_CONNECTOR_Component 8 |
#define DRM_MODE_CONNECTOR_9PinDIN 9 |
#define DRM_MODE_CONNECTOR_DisplayPort 10 |
#define DRM_MODE_CONNECTOR_HDMIA 11 |
#define DRM_MODE_CONNECTOR_HDMIB 12 |
#define DRM_MODE_CONNECTOR_TV 13 |
#define DRM_MODE_CONNECTOR_eDP 14 |
struct drm_mode_get_connector { |
__u64 encoders_ptr; |
__u64 modes_ptr; |
__u64 props_ptr; |
__u64 prop_values_ptr; |
__u32 count_modes; |
__u32 count_props; |
__u32 count_encoders; |
__u32 encoder_id; /**< Current Encoder */ |
__u32 connector_id; /**< Id */ |
__u32 connector_type; |
__u32 connector_type_id; |
__u32 connection; |
__u32 mm_width, mm_height; /**< HxW in millimeters */ |
__u32 subpixel; |
}; |
#define DRM_MODE_PROP_PENDING (1<<0) |
#define DRM_MODE_PROP_RANGE (1<<1) |
#define DRM_MODE_PROP_IMMUTABLE (1<<2) |
#define DRM_MODE_PROP_ENUM (1<<3) /* enumerated type with text strings */ |
#define DRM_MODE_PROP_BLOB (1<<4) |
struct drm_mode_property_enum { |
__u64 value; |
char name[DRM_PROP_NAME_LEN]; |
}; |
struct drm_mode_get_property { |
__u64 values_ptr; /* values and blob lengths */ |
__u64 enum_blob_ptr; /* enum and blob id ptrs */ |
__u32 prop_id; |
__u32 flags; |
char name[DRM_PROP_NAME_LEN]; |
__u32 count_values; |
__u32 count_enum_blobs; |
}; |
struct drm_mode_connector_set_property { |
__u64 value; |
__u32 prop_id; |
__u32 connector_id; |
}; |
struct drm_mode_get_blob { |
__u32 blob_id; |
__u32 length; |
__u64 data; |
}; |
struct drm_mode_fb_cmd { |
__u32 fb_id; |
__u32 width, height; |
__u32 pitch; |
__u32 bpp; |
__u32 depth; |
/* driver specific handle */ |
__u32 handle; |
}; |
#define DRM_MODE_FB_DIRTY_ANNOTATE_COPY 0x01 |
#define DRM_MODE_FB_DIRTY_ANNOTATE_FILL 0x02 |
#define DRM_MODE_FB_DIRTY_FLAGS 0x03 |
/* |
* Mark a region of a framebuffer as dirty. |
* |
* Some hardware does not automatically update display contents |
* as a hardware or software draw to a framebuffer. This ioctl |
* allows userspace to tell the kernel and the hardware what |
* regions of the framebuffer have changed. |
* |
* The kernel or hardware is free to update more then just the |
* region specified by the clip rects. The kernel or hardware |
* may also delay and/or coalesce several calls to dirty into a |
* single update. |
* |
* Userspace may annotate the updates, the annotates are a |
* promise made by the caller that the change is either a copy |
* of pixels or a fill of a single color in the region specified. |
* |
* If the DRM_MODE_FB_DIRTY_ANNOTATE_COPY flag is given then |
* the number of updated regions are half of num_clips given, |
* where the clip rects are paired in src and dst. The width and |
* height of each one of the pairs must match. |
* |
* If the DRM_MODE_FB_DIRTY_ANNOTATE_FILL flag is given the caller |
* promises that the region specified of the clip rects is filled |
* completely with a single color as given in the color argument. |
*/ |
struct drm_mode_fb_dirty_cmd { |
__u32 fb_id; |
__u32 flags; |
__u32 color; |
__u32 num_clips; |
__u64 clips_ptr; |
}; |
struct drm_mode_mode_cmd { |
__u32 connector_id; |
struct drm_mode_modeinfo mode; |
}; |
#define DRM_MODE_CURSOR_BO (1<<0) |
#define DRM_MODE_CURSOR_MOVE (1<<1) |
/* |
* depending on the value in flags diffrent members are used. |
* |
* CURSOR_BO uses |
* crtc |
* width |
* height |
* handle - if 0 turns the cursor of |
* |
* CURSOR_MOVE uses |
* crtc |
* x |
* y |
*/ |
struct drm_mode_cursor { |
__u32 flags; |
__u32 crtc_id; |
__s32 x; |
__s32 y; |
__u32 width; |
__u32 height; |
/* driver specific handle */ |
__u32 handle; |
}; |
struct drm_mode_crtc_lut { |
__u32 crtc_id; |
__u32 gamma_size; |
/* pointers to arrays */ |
__u64 red; |
__u64 green; |
__u64 blue; |
}; |
#define DRM_MODE_PAGE_FLIP_EVENT 0x01 |
#define DRM_MODE_PAGE_FLIP_FLAGS DRM_MODE_PAGE_FLIP_EVENT |
/* |
* Request a page flip on the specified crtc. |
* |
* This ioctl will ask KMS to schedule a page flip for the specified |
* crtc. Once any pending rendering targeting the specified fb (as of |
* ioctl time) has completed, the crtc will be reprogrammed to display |
* that fb after the next vertical refresh. The ioctl returns |
* immediately, but subsequent rendering to the current fb will block |
* in the execbuffer ioctl until the page flip happens. If a page |
* flip is already pending as the ioctl is called, EBUSY will be |
* returned. |
* |
* The ioctl supports one flag, DRM_MODE_PAGE_FLIP_EVENT, which will |
* request that drm sends back a vblank event (see drm.h: struct |
* drm_event_vblank) when the page flip is done. The user_data field |
* passed in with this ioctl will be returned as the user_data field |
* in the vblank event struct. |
* |
* The reserved field must be zero until we figure out something |
* clever to use it for. |
*/ |
struct drm_mode_crtc_page_flip { |
__u32 crtc_id; |
__u32 fb_id; |
__u32 flags; |
__u32 reserved; |
__u64 user_data; |
}; |
#endif |
/drivers/include/drm/drm_pciids.h |
---|
0,0 → 1,380 |
/* |
This file is auto-generated from the drm_pciids.txt in the DRM CVS |
Please contact dri-devel@lists.sf.net to add new cards to this list |
*/ |
#define radeon_PCI_IDS \ |
{0x1002, 0x3150, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \ |
{0x1002, 0x3152, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x3154, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x3E50, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x3E54, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x4136, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS100|RADEON_IS_IGP}, \ |
{0x1002, 0x4137, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS200|RADEON_IS_IGP}, \ |
{0x1002, 0x4144, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \ |
{0x1002, 0x4145, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \ |
{0x1002, 0x4146, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \ |
{0x1002, 0x4147, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \ |
{0x1002, 0x4148, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R350}, \ |
{0x1002, 0x4149, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R350}, \ |
{0x1002, 0x414A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R350}, \ |
{0x1002, 0x414B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R350}, \ |
{0x1002, 0x4150, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350}, \ |
{0x1002, 0x4151, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350}, \ |
{0x1002, 0x4152, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350}, \ |
{0x1002, 0x4153, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350}, \ |
{0x1002, 0x4154, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350}, \ |
{0x1002, 0x4155, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350}, \ |
{0x1002, 0x4156, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350}, \ |
{0x1002, 0x4237, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS200|RADEON_IS_IGP}, \ |
{0x1002, 0x4242, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \ |
{0x1002, 0x4243, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \ |
{0x1002, 0x4336, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS100|RADEON_IS_IGP|RADEON_IS_MOBILITY}, \ |
{0x1002, 0x4337, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS200|RADEON_IS_IGP|RADEON_IS_MOBILITY}, \ |
{0x1002, 0x4437, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS200|RADEON_IS_IGP|RADEON_IS_MOBILITY}, \ |
{0x1002, 0x4966, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV250}, \ |
{0x1002, 0x4967, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV250}, \ |
{0x1002, 0x4A48, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x4A49, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x4A4A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x4A4B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x4A4C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x4A4D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x4A4E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x4A4F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x4A50, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x4A54, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x4B48, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x4B49, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x4B4A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x4B4B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x4B4C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x4C57, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV200|RADEON_IS_MOBILITY}, \ |
{0x1002, 0x4C58, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV200|RADEON_IS_MOBILITY}, \ |
{0x1002, 0x4C59, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV100|RADEON_IS_MOBILITY}, \ |
{0x1002, 0x4C5A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV100|RADEON_IS_MOBILITY}, \ |
{0x1002, 0x4C64, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV250|RADEON_IS_MOBILITY}, \ |
{0x1002, 0x4C66, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV250|RADEON_IS_MOBILITY}, \ |
{0x1002, 0x4C67, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV250|RADEON_IS_MOBILITY}, \ |
{0x1002, 0x4E44, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \ |
{0x1002, 0x4E45, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \ |
{0x1002, 0x4E46, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \ |
{0x1002, 0x4E47, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \ |
{0x1002, 0x4E48, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R350}, \ |
{0x1002, 0x4E49, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R350}, \ |
{0x1002, 0x4E4A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R350}, \ |
{0x1002, 0x4E4B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R350}, \ |
{0x1002, 0x4E50, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350|RADEON_IS_MOBILITY}, \ |
{0x1002, 0x4E51, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350|RADEON_IS_MOBILITY}, \ |
{0x1002, 0x4E52, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350|RADEON_IS_MOBILITY}, \ |
{0x1002, 0x4E53, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350|RADEON_IS_MOBILITY}, \ |
{0x1002, 0x4E54, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350|RADEON_IS_MOBILITY}, \ |
{0x1002, 0x4E56, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350|RADEON_IS_MOBILITY}, \ |
{0x1002, 0x5144, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R100|RADEON_SINGLE_CRTC}, \ |
{0x1002, 0x5145, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R100|RADEON_SINGLE_CRTC}, \ |
{0x1002, 0x5146, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R100|RADEON_SINGLE_CRTC}, \ |
{0x1002, 0x5147, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R100|RADEON_SINGLE_CRTC}, \ |
{0x1002, 0x5148, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \ |
{0x1002, 0x514C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \ |
{0x1002, 0x514D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \ |
{0x1002, 0x5157, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV200}, \ |
{0x1002, 0x5158, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV200}, \ |
{0x1002, 0x5159, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV100}, \ |
{0x1002, 0x515A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV100}, \ |
{0x1002, 0x515E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV100|RADEON_SINGLE_CRTC}, \ |
{0x1002, 0x5460, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \ |
{0x1002, 0x5462, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \ |
{0x1002, 0x5464, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \ |
{0x1002, 0x5657, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x5548, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x5549, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x554A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x554B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x554C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x554D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x554E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x554F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x5550, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x5551, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x5552, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x5554, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x564A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x564B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x564F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x5652, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x5653, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x5834, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|RADEON_IS_IGP}, \ |
{0x1002, 0x5835, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|RADEON_IS_IGP|RADEON_IS_MOBILITY}, \ |
{0x1002, 0x5954, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS480|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART}, \ |
{0x1002, 0x5955, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS480|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART}, \ |
{0x1002, 0x5974, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS480|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART}, \ |
{0x1002, 0x5975, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS480|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART}, \ |
{0x1002, 0x5960, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \ |
{0x1002, 0x5961, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \ |
{0x1002, 0x5962, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \ |
{0x1002, 0x5964, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \ |
{0x1002, 0x5965, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \ |
{0x1002, 0x5969, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV100|RADEON_SINGLE_CRTC}, \ |
{0x1002, 0x5a41, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS400|RADEON_IS_IGP|RADEON_IS_IGPGART}, \ |
{0x1002, 0x5a42, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS400|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART}, \ |
{0x1002, 0x5a61, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS400|RADEON_IS_IGP|RADEON_IS_IGPGART}, \ |
{0x1002, 0x5a62, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS400|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART}, \ |
{0x1002, 0x5b60, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x5b62, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x5b63, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x5b64, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x5b65, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x5c61, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280|RADEON_IS_MOBILITY}, \ |
{0x1002, 0x5c63, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280|RADEON_IS_MOBILITY}, \ |
{0x1002, 0x5d48, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x5d49, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x5d4a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x5d4c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x5d4d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x5d4e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x5d4f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x5d50, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x5d52, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x5d57, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x5e48, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x5e4a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x5e4b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x5e4c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x5e4d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x5e4f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x7100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x7101, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x7102, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x7103, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x7104, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x7105, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x7106, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x7108, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x7109, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x710A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x710B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x710C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x710E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x710F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x7140, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x7141, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x7142, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x7143, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x7144, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x7145, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x7146, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x7147, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x7149, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x714A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x714B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x714C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x714D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x714E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x714F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x7151, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x7152, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x7153, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x715E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x715F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x7180, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x7181, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x7183, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x7186, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x7187, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x7188, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x718A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x718B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x718C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x718D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x718F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x7193, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x7196, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x719B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x719F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x71C0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x71C1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x71C2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x71C3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x71C4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x71C5, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x71C6, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x71C7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x71CD, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x71CE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x71D2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x71D4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x71D5, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x71D6, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x71DA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x71DE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x7200, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x7210, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x7211, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x7240, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x7243, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x7244, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x7245, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x7246, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x7247, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x7248, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x7249, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x724A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x724B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x724C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x724D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x724E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x724F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x7280, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV570|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x7281, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV560|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x7283, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV560|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x7284, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x7287, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV560|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x7288, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV570|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x7289, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV570|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x728B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV570|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x728C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV570|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x7290, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV560|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x7291, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV560|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x7293, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV560|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x7297, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV560|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x7834, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|RADEON_IS_IGP|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x7835, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x791e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS690|RADEON_IS_IGP|RADEON_NEW_MEMMAP|RADEON_IS_IGPGART}, \ |
{0x1002, 0x791f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS690|RADEON_IS_IGP|RADEON_NEW_MEMMAP|RADEON_IS_IGPGART}, \ |
{0x1002, 0x793f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS600|RADEON_IS_IGP|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x7941, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS600|RADEON_IS_IGP|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x7942, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS600|RADEON_IS_IGP|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x796c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS740|RADEON_IS_IGP|RADEON_NEW_MEMMAP|RADEON_IS_IGPGART}, \ |
{0x1002, 0x796d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS740|RADEON_IS_IGP|RADEON_NEW_MEMMAP|RADEON_IS_IGPGART}, \ |
{0x1002, 0x796e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS740|RADEON_IS_IGP|RADEON_NEW_MEMMAP|RADEON_IS_IGPGART}, \ |
{0x1002, 0x796f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS740|RADEON_IS_IGP|RADEON_NEW_MEMMAP|RADEON_IS_IGPGART}, \ |
{0x1002, 0x9400, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R600|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x9401, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R600|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x9402, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R600|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x9403, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R600|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x9405, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R600|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x940A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R600|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x940B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R600|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x940F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R600|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x94A0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV740|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x94A1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV740|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x94A3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV740|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x94B1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV740|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x94B3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV740|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x94B4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV740|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x94B5, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV740|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x94B9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV740|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x9440, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x9441, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x9442, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x9443, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x9444, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x9446, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x944A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x944B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x944C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x944E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x9450, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x9452, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x9456, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x945A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x945B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x9460, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x9462, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x946A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x946B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x947A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x947B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x9480, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV730|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x9487, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV730|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x9488, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV730|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x9489, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV730|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x948F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV730|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x9490, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV730|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x9491, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV730|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x9495, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV730|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x9498, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV730|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x949C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV730|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x949E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV730|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x949F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV730|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x94C0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV610|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x94C1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV610|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x94C3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV610|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x94C4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV610|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x94C5, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV610|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x94C6, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV610|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x94C7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV610|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x94C8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV610|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x94C9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV610|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x94CB, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV610|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x94CC, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV610|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x94CD, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV610|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x9500, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV670|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x9501, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV670|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x9504, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV670|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x9505, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV670|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x9506, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV670|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x9507, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV670|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x9508, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV670|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x9509, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV670|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x950F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV670|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x9511, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV670|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x9515, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV670|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x9517, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV670|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x9519, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV670|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x9540, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV710|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x9541, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV710|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x9542, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV710|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x954E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV710|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x954F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV710|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x9552, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV710|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x9553, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV710|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x9555, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV710|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x9557, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV710|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x9580, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV630|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x9581, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV630|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x9583, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV630|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x9586, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV630|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x9587, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV630|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x9588, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV630|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x9589, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV630|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x958A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV630|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x958B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV630|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x958C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV630|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x958D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV630|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x958E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV630|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x958F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV630|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x9590, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV635|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x9591, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV635|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x9593, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV635|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x9595, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV635|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x9596, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV635|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x9597, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV635|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x9598, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV635|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x9599, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV635|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x959B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV635|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x95C0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV620|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x95C2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV620|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x95C4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV620|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x95C5, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV620|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x95C6, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV620|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x95C7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV620|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x95C9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV620|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x95CC, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV620|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x95CD, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV620|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x95CE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV620|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x95CF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV620|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x9610, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS780|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
{0x1002, 0x9611, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS780|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
{0x1002, 0x9612, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS780|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
{0x1002, 0x9613, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS780|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
{0x1002, 0x9614, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS780|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
{0x1002, 0x9615, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS780|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
{0x1002, 0x9616, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS780|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
{0x1002, 0x9710, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
{0x1002, 0x9711, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
{0x1002, 0x9712, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
{0x1002, 0x9713, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
{0x1002, 0x9714, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
{0, 0, 0} |
/drivers/include/drm/radeon_drm.h |
---|
0,0 → 1,911 |
/* radeon_drm.h -- Public header for the radeon driver -*- linux-c -*- |
* |
* Copyright 2000 Precision Insight, Inc., Cedar Park, Texas. |
* Copyright 2000 VA Linux Systems, Inc., Fremont, California. |
* Copyright 2002 Tungsten Graphics, Inc., Cedar Park, Texas. |
* All rights reserved. |
* |
* Permission is hereby granted, free of charge, to any person obtaining a |
* copy of this software and associated documentation files (the "Software"), |
* to deal in the Software without restriction, including without limitation |
* the rights to use, copy, modify, merge, publish, distribute, sublicense, |
* and/or sell copies of the Software, and to permit persons to whom the |
* Software is furnished to do so, subject to the following conditions: |
* |
* The above copyright notice and this permission notice (including the next |
* paragraph) shall be included in all copies or substantial portions of the |
* Software. |
* |
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
* PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR |
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER |
* DEALINGS IN THE SOFTWARE. |
* |
* Authors: |
* Kevin E. Martin <martin@valinux.com> |
* Gareth Hughes <gareth@valinux.com> |
* Keith Whitwell <keith@tungstengraphics.com> |
*/ |
#ifndef __RADEON_DRM_H__ |
#define __RADEON_DRM_H__ |
#include "drm.h" |
/* WARNING: If you change any of these defines, make sure to change the |
* defines in the X server file (radeon_sarea.h) |
*/ |
#ifndef __RADEON_SAREA_DEFINES__ |
#define __RADEON_SAREA_DEFINES__ |
/* Old style state flags, required for sarea interface (1.1 and 1.2 |
* clears) and 1.2 drm_vertex2 ioctl. |
*/ |
#define RADEON_UPLOAD_CONTEXT 0x00000001 |
#define RADEON_UPLOAD_VERTFMT 0x00000002 |
#define RADEON_UPLOAD_LINE 0x00000004 |
#define RADEON_UPLOAD_BUMPMAP 0x00000008 |
#define RADEON_UPLOAD_MASKS 0x00000010 |
#define RADEON_UPLOAD_VIEWPORT 0x00000020 |
#define RADEON_UPLOAD_SETUP 0x00000040 |
#define RADEON_UPLOAD_TCL 0x00000080 |
#define RADEON_UPLOAD_MISC 0x00000100 |
#define RADEON_UPLOAD_TEX0 0x00000200 |
#define RADEON_UPLOAD_TEX1 0x00000400 |
#define RADEON_UPLOAD_TEX2 0x00000800 |
#define RADEON_UPLOAD_TEX0IMAGES 0x00001000 |
#define RADEON_UPLOAD_TEX1IMAGES 0x00002000 |
#define RADEON_UPLOAD_TEX2IMAGES 0x00004000 |
#define RADEON_UPLOAD_CLIPRECTS 0x00008000 /* handled client-side */ |
#define RADEON_REQUIRE_QUIESCENCE 0x00010000 |
#define RADEON_UPLOAD_ZBIAS 0x00020000 /* version 1.2 and newer */ |
#define RADEON_UPLOAD_ALL 0x003effff |
#define RADEON_UPLOAD_CONTEXT_ALL 0x003e01ff |
/* New style per-packet identifiers for use in cmd_buffer ioctl with |
* the RADEON_EMIT_PACKET command. Comments relate new packets to old |
* state bits and the packet size: |
*/ |
#define RADEON_EMIT_PP_MISC 0 /* context/7 */ |
#define RADEON_EMIT_PP_CNTL 1 /* context/3 */ |
#define RADEON_EMIT_RB3D_COLORPITCH 2 /* context/1 */ |
#define RADEON_EMIT_RE_LINE_PATTERN 3 /* line/2 */ |
#define RADEON_EMIT_SE_LINE_WIDTH 4 /* line/1 */ |
#define RADEON_EMIT_PP_LUM_MATRIX 5 /* bumpmap/1 */ |
#define RADEON_EMIT_PP_ROT_MATRIX_0 6 /* bumpmap/2 */ |
#define RADEON_EMIT_RB3D_STENCILREFMASK 7 /* masks/3 */ |
#define RADEON_EMIT_SE_VPORT_XSCALE 8 /* viewport/6 */ |
#define RADEON_EMIT_SE_CNTL 9 /* setup/2 */ |
#define RADEON_EMIT_SE_CNTL_STATUS 10 /* setup/1 */ |
#define RADEON_EMIT_RE_MISC 11 /* misc/1 */ |
#define RADEON_EMIT_PP_TXFILTER_0 12 /* tex0/6 */ |
#define RADEON_EMIT_PP_BORDER_COLOR_0 13 /* tex0/1 */ |
#define RADEON_EMIT_PP_TXFILTER_1 14 /* tex1/6 */ |
#define RADEON_EMIT_PP_BORDER_COLOR_1 15 /* tex1/1 */ |
#define RADEON_EMIT_PP_TXFILTER_2 16 /* tex2/6 */ |
#define RADEON_EMIT_PP_BORDER_COLOR_2 17 /* tex2/1 */ |
#define RADEON_EMIT_SE_ZBIAS_FACTOR 18 /* zbias/2 */ |
#define RADEON_EMIT_SE_TCL_OUTPUT_VTX_FMT 19 /* tcl/11 */ |
#define RADEON_EMIT_SE_TCL_MATERIAL_EMMISSIVE_RED 20 /* material/17 */ |
#define R200_EMIT_PP_TXCBLEND_0 21 /* tex0/4 */ |
#define R200_EMIT_PP_TXCBLEND_1 22 /* tex1/4 */ |
#define R200_EMIT_PP_TXCBLEND_2 23 /* tex2/4 */ |
#define R200_EMIT_PP_TXCBLEND_3 24 /* tex3/4 */ |
#define R200_EMIT_PP_TXCBLEND_4 25 /* tex4/4 */ |
#define R200_EMIT_PP_TXCBLEND_5 26 /* tex5/4 */ |
#define R200_EMIT_PP_TXCBLEND_6 27 /* /4 */ |
#define R200_EMIT_PP_TXCBLEND_7 28 /* /4 */ |
#define R200_EMIT_TCL_LIGHT_MODEL_CTL_0 29 /* tcl/7 */ |
#define R200_EMIT_TFACTOR_0 30 /* tf/7 */ |
#define R200_EMIT_VTX_FMT_0 31 /* vtx/5 */ |
#define R200_EMIT_VAP_CTL 32 /* vap/1 */ |
#define R200_EMIT_MATRIX_SELECT_0 33 /* msl/5 */ |
#define R200_EMIT_TEX_PROC_CTL_2 34 /* tcg/5 */ |
#define R200_EMIT_TCL_UCP_VERT_BLEND_CTL 35 /* tcl/1 */ |
#define R200_EMIT_PP_TXFILTER_0 36 /* tex0/6 */ |
#define R200_EMIT_PP_TXFILTER_1 37 /* tex1/6 */ |
#define R200_EMIT_PP_TXFILTER_2 38 /* tex2/6 */ |
#define R200_EMIT_PP_TXFILTER_3 39 /* tex3/6 */ |
#define R200_EMIT_PP_TXFILTER_4 40 /* tex4/6 */ |
#define R200_EMIT_PP_TXFILTER_5 41 /* tex5/6 */ |
#define R200_EMIT_PP_TXOFFSET_0 42 /* tex0/1 */ |
#define R200_EMIT_PP_TXOFFSET_1 43 /* tex1/1 */ |
#define R200_EMIT_PP_TXOFFSET_2 44 /* tex2/1 */ |
#define R200_EMIT_PP_TXOFFSET_3 45 /* tex3/1 */ |
#define R200_EMIT_PP_TXOFFSET_4 46 /* tex4/1 */ |
#define R200_EMIT_PP_TXOFFSET_5 47 /* tex5/1 */ |
#define R200_EMIT_VTE_CNTL 48 /* vte/1 */ |
#define R200_EMIT_OUTPUT_VTX_COMP_SEL 49 /* vtx/1 */ |
#define R200_EMIT_PP_TAM_DEBUG3 50 /* tam/1 */ |
#define R200_EMIT_PP_CNTL_X 51 /* cst/1 */ |
#define R200_EMIT_RB3D_DEPTHXY_OFFSET 52 /* cst/1 */ |
#define R200_EMIT_RE_AUX_SCISSOR_CNTL 53 /* cst/1 */ |
#define R200_EMIT_RE_SCISSOR_TL_0 54 /* cst/2 */ |
#define R200_EMIT_RE_SCISSOR_TL_1 55 /* cst/2 */ |
#define R200_EMIT_RE_SCISSOR_TL_2 56 /* cst/2 */ |
#define R200_EMIT_SE_VAP_CNTL_STATUS 57 /* cst/1 */ |
#define R200_EMIT_SE_VTX_STATE_CNTL 58 /* cst/1 */ |
#define R200_EMIT_RE_POINTSIZE 59 /* cst/1 */ |
#define R200_EMIT_TCL_INPUT_VTX_VECTOR_ADDR_0 60 /* cst/4 */ |
#define R200_EMIT_PP_CUBIC_FACES_0 61 |
#define R200_EMIT_PP_CUBIC_OFFSETS_0 62 |
#define R200_EMIT_PP_CUBIC_FACES_1 63 |
#define R200_EMIT_PP_CUBIC_OFFSETS_1 64 |
#define R200_EMIT_PP_CUBIC_FACES_2 65 |
#define R200_EMIT_PP_CUBIC_OFFSETS_2 66 |
#define R200_EMIT_PP_CUBIC_FACES_3 67 |
#define R200_EMIT_PP_CUBIC_OFFSETS_3 68 |
#define R200_EMIT_PP_CUBIC_FACES_4 69 |
#define R200_EMIT_PP_CUBIC_OFFSETS_4 70 |
#define R200_EMIT_PP_CUBIC_FACES_5 71 |
#define R200_EMIT_PP_CUBIC_OFFSETS_5 72 |
#define RADEON_EMIT_PP_TEX_SIZE_0 73 |
#define RADEON_EMIT_PP_TEX_SIZE_1 74 |
#define RADEON_EMIT_PP_TEX_SIZE_2 75 |
#define R200_EMIT_RB3D_BLENDCOLOR 76 |
#define R200_EMIT_TCL_POINT_SPRITE_CNTL 77 |
#define RADEON_EMIT_PP_CUBIC_FACES_0 78 |
#define RADEON_EMIT_PP_CUBIC_OFFSETS_T0 79 |
#define RADEON_EMIT_PP_CUBIC_FACES_1 80 |
#define RADEON_EMIT_PP_CUBIC_OFFSETS_T1 81 |
#define RADEON_EMIT_PP_CUBIC_FACES_2 82 |
#define RADEON_EMIT_PP_CUBIC_OFFSETS_T2 83 |
#define R200_EMIT_PP_TRI_PERF_CNTL 84 |
#define R200_EMIT_PP_AFS_0 85 |
#define R200_EMIT_PP_AFS_1 86 |
#define R200_EMIT_ATF_TFACTOR 87 |
#define R200_EMIT_PP_TXCTLALL_0 88 |
#define R200_EMIT_PP_TXCTLALL_1 89 |
#define R200_EMIT_PP_TXCTLALL_2 90 |
#define R200_EMIT_PP_TXCTLALL_3 91 |
#define R200_EMIT_PP_TXCTLALL_4 92 |
#define R200_EMIT_PP_TXCTLALL_5 93 |
#define R200_EMIT_VAP_PVS_CNTL 94 |
#define RADEON_MAX_STATE_PACKETS 95 |
/* Commands understood by cmd_buffer ioctl. More can be added but |
* obviously these can't be removed or changed: |
*/ |
#define RADEON_CMD_PACKET 1 /* emit one of the register packets above */ |
#define RADEON_CMD_SCALARS 2 /* emit scalar data */ |
#define RADEON_CMD_VECTORS 3 /* emit vector data */ |
#define RADEON_CMD_DMA_DISCARD 4 /* discard current dma buf */ |
#define RADEON_CMD_PACKET3 5 /* emit hw packet */ |
#define RADEON_CMD_PACKET3_CLIP 6 /* emit hw packet wrapped in cliprects */ |
#define RADEON_CMD_SCALARS2 7 /* r200 stopgap */ |
#define RADEON_CMD_WAIT 8 /* emit hw wait commands -- note: |
* doesn't make the cpu wait, just |
* the graphics hardware */ |
#define RADEON_CMD_VECLINEAR 9 /* another r200 stopgap */ |
typedef union { |
int i; |
struct { |
unsigned char cmd_type, pad0, pad1, pad2; |
} header; |
struct { |
unsigned char cmd_type, packet_id, pad0, pad1; |
} packet; |
struct { |
unsigned char cmd_type, offset, stride, count; |
} scalars; |
struct { |
unsigned char cmd_type, offset, stride, count; |
} vectors; |
struct { |
unsigned char cmd_type, addr_lo, addr_hi, count; |
} veclinear; |
struct { |
unsigned char cmd_type, buf_idx, pad0, pad1; |
} dma; |
struct { |
unsigned char cmd_type, flags, pad0, pad1; |
} wait; |
} drm_radeon_cmd_header_t; |
#define RADEON_WAIT_2D 0x1 |
#define RADEON_WAIT_3D 0x2 |
/* Allowed parameters for R300_CMD_PACKET3 |
*/ |
#define R300_CMD_PACKET3_CLEAR 0 |
#define R300_CMD_PACKET3_RAW 1 |
/* Commands understood by cmd_buffer ioctl for R300. |
* The interface has not been stabilized, so some of these may be removed |
* and eventually reordered before stabilization. |
*/ |
#define R300_CMD_PACKET0 1 |
#define R300_CMD_VPU 2 /* emit vertex program upload */ |
#define R300_CMD_PACKET3 3 /* emit a packet3 */ |
#define R300_CMD_END3D 4 /* emit sequence ending 3d rendering */ |
#define R300_CMD_CP_DELAY 5 |
#define R300_CMD_DMA_DISCARD 6 |
#define R300_CMD_WAIT 7 |
# define R300_WAIT_2D 0x1 |
# define R300_WAIT_3D 0x2 |
/* these two defines are DOING IT WRONG - however |
* we have userspace which relies on using these. |
* The wait interface is backwards compat new |
* code should use the NEW_WAIT defines below |
* THESE ARE NOT BIT FIELDS |
*/ |
# define R300_WAIT_2D_CLEAN 0x3 |
# define R300_WAIT_3D_CLEAN 0x4 |
# define R300_NEW_WAIT_2D_3D 0x3 |
# define R300_NEW_WAIT_2D_2D_CLEAN 0x4 |
# define R300_NEW_WAIT_3D_3D_CLEAN 0x6 |
# define R300_NEW_WAIT_2D_2D_CLEAN_3D_3D_CLEAN 0x8 |
#define R300_CMD_SCRATCH 8 |
#define R300_CMD_R500FP 9 |
typedef union { |
unsigned int u; |
struct { |
unsigned char cmd_type, pad0, pad1, pad2; |
} header; |
struct { |
unsigned char cmd_type, count, reglo, reghi; |
} packet0; |
struct { |
unsigned char cmd_type, count, adrlo, adrhi; |
} vpu; |
struct { |
unsigned char cmd_type, packet, pad0, pad1; |
} packet3; |
struct { |
unsigned char cmd_type, packet; |
unsigned short count; /* amount of packet2 to emit */ |
} delay; |
struct { |
unsigned char cmd_type, buf_idx, pad0, pad1; |
} dma; |
struct { |
unsigned char cmd_type, flags, pad0, pad1; |
} wait; |
struct { |
unsigned char cmd_type, reg, n_bufs, flags; |
} scratch; |
struct { |
unsigned char cmd_type, count, adrlo, adrhi_flags; |
} r500fp; |
} drm_r300_cmd_header_t; |
#define RADEON_FRONT 0x1 |
#define RADEON_BACK 0x2 |
#define RADEON_DEPTH 0x4 |
#define RADEON_STENCIL 0x8 |
#define RADEON_CLEAR_FASTZ 0x80000000 |
#define RADEON_USE_HIERZ 0x40000000 |
#define RADEON_USE_COMP_ZBUF 0x20000000 |
#define R500FP_CONSTANT_TYPE (1 << 1) |
#define R500FP_CONSTANT_CLAMP (1 << 2) |
/* Primitive types |
*/ |
#define RADEON_POINTS 0x1 |
#define RADEON_LINES 0x2 |
#define RADEON_LINE_STRIP 0x3 |
#define RADEON_TRIANGLES 0x4 |
#define RADEON_TRIANGLE_FAN 0x5 |
#define RADEON_TRIANGLE_STRIP 0x6 |
/* Vertex/indirect buffer size |
*/ |
#define RADEON_BUFFER_SIZE 65536 |
/* Byte offsets for indirect buffer data |
*/ |
#define RADEON_INDEX_PRIM_OFFSET 20 |
#define RADEON_SCRATCH_REG_OFFSET 32 |
#define R600_SCRATCH_REG_OFFSET 256 |
#define RADEON_NR_SAREA_CLIPRECTS 12 |
/* There are 2 heaps (local/GART). Each region within a heap is a |
* minimum of 64k, and there are at most 64 of them per heap. |
*/ |
#define RADEON_LOCAL_TEX_HEAP 0 |
#define RADEON_GART_TEX_HEAP 1 |
#define RADEON_NR_TEX_HEAPS 2 |
#define RADEON_NR_TEX_REGIONS 64 |
#define RADEON_LOG_TEX_GRANULARITY 16 |
#define RADEON_MAX_TEXTURE_LEVELS 12 |
#define RADEON_MAX_TEXTURE_UNITS 3 |
#define RADEON_MAX_SURFACES 8 |
/* Blits have strict offset rules. All blit offset must be aligned on |
* a 1K-byte boundary. |
*/ |
#define RADEON_OFFSET_SHIFT 10 |
#define RADEON_OFFSET_ALIGN (1 << RADEON_OFFSET_SHIFT) |
#define RADEON_OFFSET_MASK (RADEON_OFFSET_ALIGN - 1) |
#endif /* __RADEON_SAREA_DEFINES__ */ |
typedef struct { |
unsigned int red; |
unsigned int green; |
unsigned int blue; |
unsigned int alpha; |
} radeon_color_regs_t; |
typedef struct { |
/* Context state */ |
unsigned int pp_misc; /* 0x1c14 */ |
unsigned int pp_fog_color; |
unsigned int re_solid_color; |
unsigned int rb3d_blendcntl; |
unsigned int rb3d_depthoffset; |
unsigned int rb3d_depthpitch; |
unsigned int rb3d_zstencilcntl; |
unsigned int pp_cntl; /* 0x1c38 */ |
unsigned int rb3d_cntl; |
unsigned int rb3d_coloroffset; |
unsigned int re_width_height; |
unsigned int rb3d_colorpitch; |
unsigned int se_cntl; |
/* Vertex format state */ |
unsigned int se_coord_fmt; /* 0x1c50 */ |
/* Line state */ |
unsigned int re_line_pattern; /* 0x1cd0 */ |
unsigned int re_line_state; |
unsigned int se_line_width; /* 0x1db8 */ |
/* Bumpmap state */ |
unsigned int pp_lum_matrix; /* 0x1d00 */ |
unsigned int pp_rot_matrix_0; /* 0x1d58 */ |
unsigned int pp_rot_matrix_1; |
/* Mask state */ |
unsigned int rb3d_stencilrefmask; /* 0x1d7c */ |
unsigned int rb3d_ropcntl; |
unsigned int rb3d_planemask; |
/* Viewport state */ |
unsigned int se_vport_xscale; /* 0x1d98 */ |
unsigned int se_vport_xoffset; |
unsigned int se_vport_yscale; |
unsigned int se_vport_yoffset; |
unsigned int se_vport_zscale; |
unsigned int se_vport_zoffset; |
/* Setup state */ |
unsigned int se_cntl_status; /* 0x2140 */ |
/* Misc state */ |
unsigned int re_top_left; /* 0x26c0 */ |
unsigned int re_misc; |
} drm_radeon_context_regs_t; |
typedef struct { |
/* Zbias state */ |
unsigned int se_zbias_factor; /* 0x1dac */ |
unsigned int se_zbias_constant; |
} drm_radeon_context2_regs_t; |
/* Setup registers for each texture unit |
*/ |
typedef struct { |
unsigned int pp_txfilter; |
unsigned int pp_txformat; |
unsigned int pp_txoffset; |
unsigned int pp_txcblend; |
unsigned int pp_txablend; |
unsigned int pp_tfactor; |
unsigned int pp_border_color; |
} drm_radeon_texture_regs_t; |
typedef struct { |
unsigned int start; |
unsigned int finish; |
unsigned int prim:8; |
unsigned int stateidx:8; |
unsigned int numverts:16; /* overloaded as offset/64 for elt prims */ |
unsigned int vc_format; /* vertex format */ |
} drm_radeon_prim_t; |
typedef struct { |
drm_radeon_context_regs_t context; |
drm_radeon_texture_regs_t tex[RADEON_MAX_TEXTURE_UNITS]; |
drm_radeon_context2_regs_t context2; |
unsigned int dirty; |
} drm_radeon_state_t; |
typedef struct { |
/* The channel for communication of state information to the |
* kernel on firing a vertex buffer with either of the |
* obsoleted vertex/index ioctls. |
*/ |
drm_radeon_context_regs_t context_state; |
drm_radeon_texture_regs_t tex_state[RADEON_MAX_TEXTURE_UNITS]; |
unsigned int dirty; |
unsigned int vertsize; |
unsigned int vc_format; |
/* The current cliprects, or a subset thereof. |
*/ |
// struct drm_clip_rect boxes[RADEON_NR_SAREA_CLIPRECTS]; |
unsigned int nbox; |
/* Counters for client-side throttling of rendering clients. |
*/ |
unsigned int last_frame; |
unsigned int last_dispatch; |
unsigned int last_clear; |
// struct drm_tex_region tex_list[RADEON_NR_TEX_HEAPS][RADEON_NR_TEX_REGIONS + |
// 1]; |
unsigned int tex_age[RADEON_NR_TEX_HEAPS]; |
int ctx_owner; |
int pfState; /* number of 3d windows (0,1,2ormore) */ |
int pfCurrentPage; /* which buffer is being displayed? */ |
int crtc2_base; /* CRTC2 frame offset */ |
int tiling_enabled; /* set by drm, read by 2d + 3d clients */ |
} drm_radeon_sarea_t; |
/* WARNING: If you change any of these defines, make sure to change the |
* defines in the Xserver file (xf86drmRadeon.h) |
* |
* KW: actually it's illegal to change any of this (backwards compatibility). |
*/ |
/* Radeon specific ioctls |
* The device specific ioctl range is 0x40 to 0x79. |
*/ |
#define DRM_RADEON_CP_INIT 0x00 |
#define DRM_RADEON_CP_START 0x01 |
#define DRM_RADEON_CP_STOP 0x02 |
#define DRM_RADEON_CP_RESET 0x03 |
#define DRM_RADEON_CP_IDLE 0x04 |
#define DRM_RADEON_RESET 0x05 |
#define DRM_RADEON_FULLSCREEN 0x06 |
#define DRM_RADEON_SWAP 0x07 |
#define DRM_RADEON_CLEAR 0x08 |
#define DRM_RADEON_VERTEX 0x09 |
#define DRM_RADEON_INDICES 0x0A |
#define DRM_RADEON_NOT_USED |
#define DRM_RADEON_STIPPLE 0x0C |
#define DRM_RADEON_INDIRECT 0x0D |
#define DRM_RADEON_TEXTURE 0x0E |
#define DRM_RADEON_VERTEX2 0x0F |
#define DRM_RADEON_CMDBUF 0x10 |
#define DRM_RADEON_GETPARAM 0x11 |
#define DRM_RADEON_FLIP 0x12 |
#define DRM_RADEON_ALLOC 0x13 |
#define DRM_RADEON_FREE 0x14 |
#define DRM_RADEON_INIT_HEAP 0x15 |
#define DRM_RADEON_IRQ_EMIT 0x16 |
#define DRM_RADEON_IRQ_WAIT 0x17 |
#define DRM_RADEON_CP_RESUME 0x18 |
#define DRM_RADEON_SETPARAM 0x19 |
#define DRM_RADEON_SURF_ALLOC 0x1a |
#define DRM_RADEON_SURF_FREE 0x1b |
/* KMS ioctl */ |
#define DRM_RADEON_GEM_INFO 0x1c |
#define DRM_RADEON_GEM_CREATE 0x1d |
#define DRM_RADEON_GEM_MMAP 0x1e |
#define DRM_RADEON_GEM_PREAD 0x21 |
#define DRM_RADEON_GEM_PWRITE 0x22 |
#define DRM_RADEON_GEM_SET_DOMAIN 0x23 |
#define DRM_RADEON_GEM_WAIT_IDLE 0x24 |
#define DRM_RADEON_CS 0x26 |
#define DRM_RADEON_INFO 0x27 |
#define DRM_RADEON_GEM_SET_TILING 0x28 |
#define DRM_RADEON_GEM_GET_TILING 0x29 |
#define DRM_RADEON_GEM_BUSY 0x2a |
#define DRM_IOCTL_RADEON_CP_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_CP_INIT, drm_radeon_init_t) |
#define DRM_IOCTL_RADEON_CP_START DRM_IO( DRM_COMMAND_BASE + DRM_RADEON_CP_START) |
#define DRM_IOCTL_RADEON_CP_STOP DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_CP_STOP, drm_radeon_cp_stop_t) |
#define DRM_IOCTL_RADEON_CP_RESET DRM_IO( DRM_COMMAND_BASE + DRM_RADEON_CP_RESET) |
#define DRM_IOCTL_RADEON_CP_IDLE DRM_IO( DRM_COMMAND_BASE + DRM_RADEON_CP_IDLE) |
#define DRM_IOCTL_RADEON_RESET DRM_IO( DRM_COMMAND_BASE + DRM_RADEON_RESET) |
#define DRM_IOCTL_RADEON_FULLSCREEN DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_FULLSCREEN, drm_radeon_fullscreen_t) |
#define DRM_IOCTL_RADEON_SWAP DRM_IO( DRM_COMMAND_BASE + DRM_RADEON_SWAP) |
#define DRM_IOCTL_RADEON_CLEAR DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_CLEAR, drm_radeon_clear_t) |
#define DRM_IOCTL_RADEON_VERTEX DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_VERTEX, drm_radeon_vertex_t) |
#define DRM_IOCTL_RADEON_INDICES DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_INDICES, drm_radeon_indices_t) |
#define DRM_IOCTL_RADEON_STIPPLE DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_STIPPLE, drm_radeon_stipple_t) |
#define DRM_IOCTL_RADEON_INDIRECT DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_INDIRECT, drm_radeon_indirect_t) |
#define DRM_IOCTL_RADEON_TEXTURE DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_TEXTURE, drm_radeon_texture_t) |
#define DRM_IOCTL_RADEON_VERTEX2 DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_VERTEX2, drm_radeon_vertex2_t) |
#define DRM_IOCTL_RADEON_CMDBUF DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_CMDBUF, drm_radeon_cmd_buffer_t) |
#define DRM_IOCTL_RADEON_GETPARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GETPARAM, drm_radeon_getparam_t) |
#define DRM_IOCTL_RADEON_FLIP DRM_IO( DRM_COMMAND_BASE + DRM_RADEON_FLIP) |
#define DRM_IOCTL_RADEON_ALLOC DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_ALLOC, drm_radeon_mem_alloc_t) |
#define DRM_IOCTL_RADEON_FREE DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_FREE, drm_radeon_mem_free_t) |
#define DRM_IOCTL_RADEON_INIT_HEAP DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_INIT_HEAP, drm_radeon_mem_init_heap_t) |
#define DRM_IOCTL_RADEON_IRQ_EMIT DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_IRQ_EMIT, drm_radeon_irq_emit_t) |
#define DRM_IOCTL_RADEON_IRQ_WAIT DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_IRQ_WAIT, drm_radeon_irq_wait_t) |
#define DRM_IOCTL_RADEON_CP_RESUME DRM_IO( DRM_COMMAND_BASE + DRM_RADEON_CP_RESUME) |
#define DRM_IOCTL_RADEON_SETPARAM DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_SETPARAM, drm_radeon_setparam_t) |
#define DRM_IOCTL_RADEON_SURF_ALLOC DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_SURF_ALLOC, drm_radeon_surface_alloc_t) |
#define DRM_IOCTL_RADEON_SURF_FREE DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_SURF_FREE, drm_radeon_surface_free_t) |
/* KMS */ |
#define DRM_IOCTL_RADEON_GEM_INFO DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_INFO, struct drm_radeon_gem_info) |
#define DRM_IOCTL_RADEON_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_CREATE, struct drm_radeon_gem_create) |
#define DRM_IOCTL_RADEON_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_MMAP, struct drm_radeon_gem_mmap) |
#define DRM_IOCTL_RADEON_GEM_PREAD DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_PREAD, struct drm_radeon_gem_pread) |
#define DRM_IOCTL_RADEON_GEM_PWRITE DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_PWRITE, struct drm_radeon_gem_pwrite) |
#define DRM_IOCTL_RADEON_GEM_SET_DOMAIN DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_SET_DOMAIN, struct drm_radeon_gem_set_domain) |
#define DRM_IOCTL_RADEON_GEM_WAIT_IDLE DRM_IOW(DRM_COMMAND_BASE + DRM_RADEON_GEM_WAIT_IDLE, struct drm_radeon_gem_wait_idle) |
#define DRM_IOCTL_RADEON_CS DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_CS, struct drm_radeon_cs) |
#define DRM_IOCTL_RADEON_INFO DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_INFO, struct drm_radeon_info) |
#define DRM_IOCTL_RADEON_SET_TILING DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_SET_TILING, struct drm_radeon_gem_set_tiling) |
#define DRM_IOCTL_RADEON_GET_TILING DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_GET_TILING, struct drm_radeon_gem_get_tiling) |
#define DRM_IOCTL_RADEON_GEM_BUSY DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_BUSY, struct drm_radeon_gem_busy) |
typedef struct drm_radeon_init { |
enum { |
RADEON_INIT_CP = 0x01, |
RADEON_CLEANUP_CP = 0x02, |
RADEON_INIT_R200_CP = 0x03, |
RADEON_INIT_R300_CP = 0x04, |
RADEON_INIT_R600_CP = 0x05 |
} func; |
unsigned long sarea_priv_offset; |
int is_pci; |
int cp_mode; |
int gart_size; |
int ring_size; |
int usec_timeout; |
unsigned int fb_bpp; |
unsigned int front_offset, front_pitch; |
unsigned int back_offset, back_pitch; |
unsigned int depth_bpp; |
unsigned int depth_offset, depth_pitch; |
unsigned long fb_offset; |
unsigned long mmio_offset; |
unsigned long ring_offset; |
unsigned long ring_rptr_offset; |
unsigned long buffers_offset; |
unsigned long gart_textures_offset; |
} drm_radeon_init_t; |
typedef struct drm_radeon_cp_stop { |
int flush; |
int idle; |
} drm_radeon_cp_stop_t; |
typedef struct drm_radeon_fullscreen { |
enum { |
RADEON_INIT_FULLSCREEN = 0x01, |
RADEON_CLEANUP_FULLSCREEN = 0x02 |
} func; |
} drm_radeon_fullscreen_t; |
#define CLEAR_X1 0 |
#define CLEAR_Y1 1 |
#define CLEAR_X2 2 |
#define CLEAR_Y2 3 |
#define CLEAR_DEPTH 4 |
typedef union drm_radeon_clear_rect { |
float f[5]; |
unsigned int ui[5]; |
} drm_radeon_clear_rect_t; |
typedef struct drm_radeon_clear { |
unsigned int flags; |
unsigned int clear_color; |
unsigned int clear_depth; |
unsigned int color_mask; |
unsigned int depth_mask; /* misnamed field: should be stencil */ |
// drm_radeon_clear_rect_t __user *depth_boxes; |
} drm_radeon_clear_t; |
typedef struct drm_radeon_vertex { |
int prim; |
int idx; /* Index of vertex buffer */ |
int count; /* Number of vertices in buffer */ |
int discard; /* Client finished with buffer? */ |
} drm_radeon_vertex_t; |
typedef struct drm_radeon_indices { |
int prim; |
int idx; |
int start; |
int end; |
int discard; /* Client finished with buffer? */ |
} drm_radeon_indices_t; |
/* v1.2 - obsoletes drm_radeon_vertex and drm_radeon_indices |
* - allows multiple primitives and state changes in a single ioctl |
* - supports driver change to emit native primitives |
*/ |
typedef struct drm_radeon_vertex2 { |
int idx; /* Index of vertex buffer */ |
int discard; /* Client finished with buffer? */ |
int nr_states; |
// drm_radeon_state_t __user *state; |
int nr_prims; |
// drm_radeon_prim_t __user *prim; |
} drm_radeon_vertex2_t; |
/* v1.3 - obsoletes drm_radeon_vertex2 |
* - allows arbitarily large cliprect list |
* - allows updating of tcl packet, vector and scalar state |
* - allows memory-efficient description of state updates |
* - allows state to be emitted without a primitive |
* (for clears, ctx switches) |
* - allows more than one dma buffer to be referenced per ioctl |
* - supports tcl driver |
* - may be extended in future versions with new cmd types, packets |
*/ |
typedef struct drm_radeon_cmd_buffer { |
int bufsz; |
char __user *buf; |
int nbox; |
struct drm_clip_rect __user *boxes; |
} drm_radeon_cmd_buffer_t; |
typedef struct drm_radeon_tex_image { |
unsigned int x, y; /* Blit coordinates */ |
unsigned int width, height; |
const void __user *data; |
} drm_radeon_tex_image_t; |
typedef struct drm_radeon_texture { |
unsigned int offset; |
int pitch; |
int format; |
int width; /* Texture image coordinates */ |
int height; |
drm_radeon_tex_image_t __user *image; |
} drm_radeon_texture_t; |
typedef struct drm_radeon_stipple { |
unsigned int __user *mask; |
} drm_radeon_stipple_t; |
typedef struct drm_radeon_indirect { |
int idx; |
int start; |
int end; |
int discard; |
} drm_radeon_indirect_t; |
/* enum for card type parameters */ |
#define RADEON_CARD_PCI 0 |
#define RADEON_CARD_AGP 1 |
#define RADEON_CARD_PCIE 2 |
/* 1.3: An ioctl to get parameters that aren't available to the 3d |
* client any other way. |
*/ |
#define RADEON_PARAM_GART_BUFFER_OFFSET 1 /* card offset of 1st GART buffer */ |
#define RADEON_PARAM_LAST_FRAME 2 |
#define RADEON_PARAM_LAST_DISPATCH 3 |
#define RADEON_PARAM_LAST_CLEAR 4 |
/* Added with DRM version 1.6. */ |
#define RADEON_PARAM_IRQ_NR 5 |
#define RADEON_PARAM_GART_BASE 6 /* card offset of GART base */ |
/* Added with DRM version 1.8. */ |
#define RADEON_PARAM_REGISTER_HANDLE 7 /* for drmMap() */ |
#define RADEON_PARAM_STATUS_HANDLE 8 |
#define RADEON_PARAM_SAREA_HANDLE 9 |
#define RADEON_PARAM_GART_TEX_HANDLE 10 |
#define RADEON_PARAM_SCRATCH_OFFSET 11 |
#define RADEON_PARAM_CARD_TYPE 12 |
#define RADEON_PARAM_VBLANK_CRTC 13 /* VBLANK CRTC */ |
#define RADEON_PARAM_FB_LOCATION 14 /* FB location */ |
#define RADEON_PARAM_NUM_GB_PIPES 15 /* num GB pipes */ |
#define RADEON_PARAM_DEVICE_ID 16 |
#define RADEON_PARAM_NUM_Z_PIPES 17 /* num Z pipes */ |
typedef struct drm_radeon_getparam { |
int param; |
void __user *value; |
} drm_radeon_getparam_t; |
/* 1.6: Set up a memory manager for regions of shared memory: |
*/ |
#define RADEON_MEM_REGION_GART 1 |
#define RADEON_MEM_REGION_FB 2 |
typedef struct drm_radeon_mem_alloc { |
int region; |
int alignment; |
int size; |
int __user *region_offset; /* offset from start of fb or GART */ |
} drm_radeon_mem_alloc_t; |
typedef struct drm_radeon_mem_free { |
int region; |
int region_offset; |
} drm_radeon_mem_free_t; |
typedef struct drm_radeon_mem_init_heap { |
int region; |
int size; |
int start; |
} drm_radeon_mem_init_heap_t; |
/* 1.6: Userspace can request & wait on irq's: |
*/ |
typedef struct drm_radeon_irq_emit { |
int __user *irq_seq; |
} drm_radeon_irq_emit_t; |
typedef struct drm_radeon_irq_wait { |
int irq_seq; |
} drm_radeon_irq_wait_t; |
/* 1.10: Clients tell the DRM where they think the framebuffer is located in |
* the card's address space, via a new generic ioctl to set parameters |
*/ |
typedef struct drm_radeon_setparam { |
unsigned int param; |
__s64 value; |
} drm_radeon_setparam_t; |
#define RADEON_SETPARAM_FB_LOCATION 1 /* determined framebuffer location */ |
#define RADEON_SETPARAM_SWITCH_TILING 2 /* enable/disable color tiling */ |
#define RADEON_SETPARAM_PCIGART_LOCATION 3 /* PCI Gart Location */ |
#define RADEON_SETPARAM_NEW_MEMMAP 4 /* Use new memory map */ |
#define RADEON_SETPARAM_PCIGART_TABLE_SIZE 5 /* PCI GART Table Size */ |
#define RADEON_SETPARAM_VBLANK_CRTC 6 /* VBLANK CRTC */ |
/* 1.14: Clients can allocate/free a surface |
*/ |
typedef struct drm_radeon_surface_alloc { |
unsigned int address; |
unsigned int size; |
unsigned int flags; |
} drm_radeon_surface_alloc_t; |
typedef struct drm_radeon_surface_free { |
unsigned int address; |
} drm_radeon_surface_free_t; |
#define DRM_RADEON_VBLANK_CRTC1 1 |
#define DRM_RADEON_VBLANK_CRTC2 2 |
/* |
* Kernel modesetting world below. |
*/ |
#define RADEON_GEM_DOMAIN_CPU 0x1 |
#define RADEON_GEM_DOMAIN_GTT 0x2 |
#define RADEON_GEM_DOMAIN_VRAM 0x4 |
struct drm_radeon_gem_info { |
uint64_t gart_size; |
uint64_t vram_size; |
uint64_t vram_visible; |
}; |
#define RADEON_GEM_NO_BACKING_STORE 1 |
struct drm_radeon_gem_create { |
uint64_t size; |
uint64_t alignment; |
uint32_t handle; |
uint32_t initial_domain; |
uint32_t flags; |
}; |
#define RADEON_TILING_MACRO 0x1 |
#define RADEON_TILING_MICRO 0x2 |
#define RADEON_TILING_SWAP_16BIT 0x4 |
#define RADEON_TILING_SWAP_32BIT 0x8 |
#define RADEON_TILING_SURFACE 0x10 /* this object requires a surface |
* when mapped - i.e. front buffer */ |
struct drm_radeon_gem_set_tiling { |
uint32_t handle; |
uint32_t tiling_flags; |
uint32_t pitch; |
}; |
struct drm_radeon_gem_get_tiling { |
uint32_t handle; |
uint32_t tiling_flags; |
uint32_t pitch; |
}; |
struct drm_radeon_gem_mmap { |
uint32_t handle; |
uint32_t pad; |
uint64_t offset; |
uint64_t size; |
uint64_t addr_ptr; |
}; |
struct drm_radeon_gem_set_domain { |
uint32_t handle; |
uint32_t read_domains; |
uint32_t write_domain; |
}; |
struct drm_radeon_gem_wait_idle { |
uint32_t handle; |
uint32_t pad; |
}; |
struct drm_radeon_gem_busy { |
uint32_t handle; |
uint32_t domain; |
}; |
struct drm_radeon_gem_pread { |
/** Handle for the object being read. */ |
uint32_t handle; |
uint32_t pad; |
/** Offset into the object to read from */ |
uint64_t offset; |
/** Length of data to read */ |
uint64_t size; |
/** Pointer to write the data into. */ |
/* void *, but pointers are not 32/64 compatible */ |
uint64_t data_ptr; |
}; |
struct drm_radeon_gem_pwrite { |
/** Handle for the object being written to. */ |
uint32_t handle; |
uint32_t pad; |
/** Offset into the object to write to */ |
uint64_t offset; |
/** Length of data to write */ |
uint64_t size; |
/** Pointer to read the data from. */ |
/* void *, but pointers are not 32/64 compatible */ |
uint64_t data_ptr; |
}; |
#define RADEON_CHUNK_ID_RELOCS 0x01 |
#define RADEON_CHUNK_ID_IB 0x02 |
struct drm_radeon_cs_chunk { |
uint32_t chunk_id; |
uint32_t length_dw; |
uint64_t chunk_data; |
}; |
struct drm_radeon_cs_reloc { |
uint32_t handle; |
uint32_t read_domains; |
uint32_t write_domain; |
uint32_t flags; |
}; |
struct drm_radeon_cs { |
uint32_t num_chunks; |
uint32_t cs_id; |
/* this points to uint64_t * which point to cs chunks */ |
uint64_t chunks; |
/* updates to the limits after this CS ioctl */ |
uint64_t gart_limit; |
uint64_t vram_limit; |
}; |
#define RADEON_INFO_DEVICE_ID 0x00 |
#define RADEON_INFO_NUM_GB_PIPES 0x01 |
#define RADEON_INFO_NUM_Z_PIPES 0x02 |
#define RADEON_INFO_ACCEL_WORKING 0x03 |
struct drm_radeon_info { |
uint32_t request; |
uint32_t pad; |
uint64_t value; |
}; |
#endif |
/drivers/include/drm/ttm/ttm_bo_api.h |
---|
0,0 → 1,637 |
/************************************************************************** |
* |
* Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA |
* All Rights Reserved. |
* |
* Permission is hereby granted, free of charge, to any person obtaining a |
* copy of this software and associated documentation files (the |
* "Software"), to deal in the Software without restriction, including |
* without limitation the rights to use, copy, modify, merge, publish, |
* distribute, sub license, and/or sell copies of the Software, and to |
* permit persons to whom the Software is furnished to do so, subject to |
* the following conditions: |
* |
* The above copyright notice and this permission notice (including the |
* next paragraph) shall be included in all copies or substantial portions |
* of the Software. |
* |
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL |
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, |
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR |
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE |
* USE OR OTHER DEALINGS IN THE SOFTWARE. |
* |
**************************************************************************/ |
/* |
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> |
*/ |
#ifndef _TTM_BO_API_H_ |
#define _TTM_BO_API_H_ |
#include "drm_hashtab.h" |
#include <linux/kref.h> |
#include <linux/list.h> |
#include <linux/spinlock.h> |
//#include <linux/wait.h> |
//#include <linux/mutex.h> |
//#include <linux/mm.h> |
//#include <linux/rbtree.h> |
#include <linux/bitmap.h> |
struct ttm_bo_device; |
struct drm_mm_node; |
/** |
* struct ttm_placement |
* |
* @fpfn: first valid page frame number to put the object |
* @lpfn: last valid page frame number to put the object |
* @num_placement: number of prefered placements |
* @placement: prefered placements |
* @num_busy_placement: number of prefered placements when need to evict buffer |
* @busy_placement: prefered placements when need to evict buffer |
* |
* Structure indicating the placement you request for an object. |
*/ |
struct ttm_placement { |
unsigned fpfn; |
unsigned lpfn; |
unsigned num_placement; |
const uint32_t *placement; |
unsigned num_busy_placement; |
const uint32_t *busy_placement; |
}; |
/** |
* struct ttm_mem_reg |
* |
* @mm_node: Memory manager node. |
* @size: Requested size of memory region. |
* @num_pages: Actual size of memory region in pages. |
* @page_alignment: Page alignment. |
* @placement: Placement flags. |
* |
* Structure indicating the placement and space resources used by a |
* buffer object. |
*/ |
struct ttm_mem_reg { |
struct drm_mm_node *mm_node; |
unsigned long size; |
unsigned long num_pages; |
uint32_t page_alignment; |
uint32_t mem_type; |
uint32_t placement; |
}; |
/** |
* enum ttm_bo_type |
* |
* @ttm_bo_type_device: These are 'normal' buffers that can |
* be mmapped by user space. Each of these bos occupy a slot in the |
* device address space, that can be used for normal vm operations. |
* |
* @ttm_bo_type_user: These are user-space memory areas that are made |
* available to the GPU by mapping the buffer pages into the GPU aperture |
* space. These buffers cannot be mmaped from the device address space. |
* |
* @ttm_bo_type_kernel: These buffers are like ttm_bo_type_device buffers, |
* but they cannot be accessed from user-space. For kernel-only use. |
*/ |
enum ttm_bo_type { |
ttm_bo_type_device, |
ttm_bo_type_user, |
ttm_bo_type_kernel |
}; |
struct ttm_tt; |
/** |
* struct ttm_buffer_object |
* |
* @bdev: Pointer to the buffer object device structure. |
* @buffer_start: The virtual user-space start address of ttm_bo_type_user |
* buffers. |
* @type: The bo type. |
* @destroy: Destruction function. If NULL, kfree is used. |
* @num_pages: Actual number of pages. |
* @addr_space_offset: Address space offset. |
* @acc_size: Accounted size for this object. |
* @kref: Reference count of this buffer object. When this refcount reaches |
* zero, the object is put on the delayed delete list. |
* @list_kref: List reference count of this buffer object. This member is |
* used to avoid destruction while the buffer object is still on a list. |
* Lru lists may keep one refcount, the delayed delete list, and kref != 0 |
* keeps one refcount. When this refcount reaches zero, |
* the object is destroyed. |
* @event_queue: Queue for processes waiting on buffer object status change. |
* @lock: spinlock protecting mostly synchronization members. |
* @mem: structure describing current placement. |
* @persistant_swap_storage: Usually the swap storage is deleted for buffers |
* pinned in physical memory. If this behaviour is not desired, this member |
* holds a pointer to a persistant shmem object. |
* @ttm: TTM structure holding system pages. |
* @evicted: Whether the object was evicted without user-space knowing. |
* @cpu_writes: For synchronization. Number of cpu writers. |
* @lru: List head for the lru list. |
* @ddestroy: List head for the delayed destroy list. |
* @swap: List head for swap LRU list. |
* @val_seq: Sequence of the validation holding the @reserved lock. |
* Used to avoid starvation when many processes compete to validate the |
* buffer. This member is protected by the bo_device::lru_lock. |
* @seq_valid: The value of @val_seq is valid. This value is protected by |
* the bo_device::lru_lock. |
* @reserved: Deadlock-free lock used for synchronization state transitions. |
* @sync_obj_arg: Opaque argument to synchronization object function. |
* @sync_obj: Pointer to a synchronization object. |
* @priv_flags: Flags describing buffer object internal state. |
* @vm_rb: Rb node for the vm rb tree. |
* @vm_node: Address space manager node. |
* @offset: The current GPU offset, which can have different meanings |
* depending on the memory type. For SYSTEM type memory, it should be 0. |
* @cur_placement: Hint of current placement. |
* |
* Base class for TTM buffer object, that deals with data placement and CPU |
* mappings. GPU mappings are really up to the driver, but for simpler GPUs |
* the driver can usually use the placement offset @offset directly as the |
* GPU virtual address. For drivers implementing multiple |
* GPU memory manager contexts, the driver should manage the address space |
* in these contexts separately and use these objects to get the correct |
* placement and caching for these GPU maps. This makes it possible to use |
* these objects for even quite elaborate memory management schemes. |
* The destroy member, the API visibility of this object makes it possible |
* to derive driver specific types. |
*/ |
struct ttm_buffer_object { |
/** |
* Members constant at init. |
*/ |
struct ttm_bo_global *glob; |
struct ttm_bo_device *bdev; |
unsigned long buffer_start; |
enum ttm_bo_type type; |
void (*destroy) (struct ttm_buffer_object *); |
unsigned long num_pages; |
uint64_t addr_space_offset; |
size_t acc_size; |
/** |
* Members not needing protection. |
*/ |
struct kref kref; |
struct kref list_kref; |
// wait_queue_head_t event_queue; |
spinlock_t lock; |
/** |
* Members protected by the bo::reserved lock. |
*/ |
struct ttm_mem_reg mem; |
// struct file *persistant_swap_storage; |
struct ttm_tt *ttm; |
bool evicted; |
/** |
* Members protected by the bo::reserved lock only when written to. |
*/ |
atomic_t cpu_writers; |
/** |
* Members protected by the bdev::lru_lock. |
*/ |
struct list_head lru; |
struct list_head ddestroy; |
struct list_head swap; |
uint32_t val_seq; |
bool seq_valid; |
/** |
* Members protected by the bdev::lru_lock |
* only when written to. |
*/ |
atomic_t reserved; |
/** |
* Members protected by the bo::lock |
*/ |
void *sync_obj_arg; |
void *sync_obj; |
unsigned long priv_flags; |
/** |
* Members protected by the bdev::vm_lock |
*/ |
// struct rb_node vm_rb; |
struct drm_mm_node *vm_node; |
/** |
* Special members that are protected by the reserve lock |
* and the bo::lock when written to. Can be read with |
* either of these locks held. |
*/ |
unsigned long offset; |
uint32_t cur_placement; |
}; |
/** |
* struct ttm_bo_kmap_obj |
* |
* @virtual: The current kernel virtual address. |
* @page: The page when kmap'ing a single page. |
* @bo_kmap_type: Type of bo_kmap. |
* |
* Object describing a kernel mapping. Since a TTM bo may be located |
* in various memory types with various caching policies, the |
* mapping can either be an ioremap, a vmap, a kmap or part of a |
* premapped region. |
*/ |
#define TTM_BO_MAP_IOMEM_MASK 0x80 |
struct ttm_bo_kmap_obj { |
void *virtual; |
struct page *page; |
enum { |
ttm_bo_map_iomap = 1 | TTM_BO_MAP_IOMEM_MASK, |
ttm_bo_map_vmap = 2, |
ttm_bo_map_kmap = 3, |
ttm_bo_map_premapped = 4 | TTM_BO_MAP_IOMEM_MASK, |
} bo_kmap_type; |
}; |
/** |
* ttm_bo_reference - reference a struct ttm_buffer_object |
* |
* @bo: The buffer object. |
* |
* Returns a refcounted pointer to a buffer object. |
*/ |
static inline struct ttm_buffer_object * |
ttm_bo_reference(struct ttm_buffer_object *bo) |
{ |
kref_get(&bo->kref); |
return bo; |
} |
/** |
* ttm_bo_wait - wait for buffer idle. |
* |
* @bo: The buffer object. |
* @interruptible: Use interruptible wait. |
* @no_wait: Return immediately if buffer is busy. |
* |
* This function must be called with the bo::mutex held, and makes |
* sure any previous rendering to the buffer is completed. |
* Note: It might be necessary to block validations before the |
* wait by reserving the buffer. |
* Returns -EBUSY if no_wait is true and the buffer is busy. |
* Returns -ERESTARTSYS if interrupted by a signal. |
*/ |
extern int ttm_bo_wait(struct ttm_buffer_object *bo, bool lazy, |
bool interruptible, bool no_wait); |
/** |
* ttm_bo_validate |
* |
* @bo: The buffer object. |
* @placement: Proposed placement for the buffer object. |
* @interruptible: Sleep interruptible if sleeping. |
* @no_wait: Return immediately if the buffer is busy. |
* |
* Changes placement and caching policy of the buffer object |
* according proposed placement. |
* Returns |
* -EINVAL on invalid proposed placement. |
* -ENOMEM on out-of-memory condition. |
* -EBUSY if no_wait is true and buffer busy. |
* -ERESTARTSYS if interrupted by a signal. |
*/ |
extern int ttm_bo_validate(struct ttm_buffer_object *bo, |
struct ttm_placement *placement, |
bool interruptible, bool no_wait); |
/** |
* ttm_bo_unref |
* |
* @bo: The buffer object. |
* |
* Unreference and clear a pointer to a buffer object. |
*/ |
extern void ttm_bo_unref(struct ttm_buffer_object **bo); |
/** |
* ttm_bo_synccpu_write_grab |
* |
* @bo: The buffer object: |
* @no_wait: Return immediately if buffer is busy. |
* |
* Synchronizes a buffer object for CPU RW access. This means |
* blocking command submission that affects the buffer and |
* waiting for buffer idle. This lock is recursive. |
* Returns |
* -EBUSY if the buffer is busy and no_wait is true. |
* -ERESTARTSYS if interrupted by a signal. |
*/ |
extern int |
ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait); |
/** |
* ttm_bo_synccpu_write_release: |
* |
* @bo : The buffer object. |
* |
* Releases a synccpu lock. |
*/ |
extern void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo); |
/** |
* ttm_bo_init |
* |
* @bdev: Pointer to a ttm_bo_device struct. |
* @bo: Pointer to a ttm_buffer_object to be initialized. |
* @size: Requested size of buffer object. |
* @type: Requested type of buffer object. |
* @flags: Initial placement flags. |
* @page_alignment: Data alignment in pages. |
* @buffer_start: Virtual address of user space data backing a |
* user buffer object. |
* @interruptible: If needing to sleep to wait for GPU resources, |
* sleep interruptible. |
* @persistant_swap_storage: Usually the swap storage is deleted for buffers |
* pinned in physical memory. If this behaviour is not desired, this member |
* holds a pointer to a persistant shmem object. Typically, this would |
* point to the shmem object backing a GEM object if TTM is used to back a |
* GEM user interface. |
* @acc_size: Accounted size for this object. |
* @destroy: Destroy function. Use NULL for kfree(). |
* |
* This function initializes a pre-allocated struct ttm_buffer_object. |
* As this object may be part of a larger structure, this function, |
* together with the @destroy function, |
* enables driver-specific objects derived from a ttm_buffer_object. |
* On successful return, the object kref and list_kref are set to 1. |
* Returns |
* -ENOMEM: Out of memory. |
* -EINVAL: Invalid placement flags. |
* -ERESTARTSYS: Interrupted by signal while sleeping waiting for resources. |
*/ |
extern int ttm_bo_init(struct ttm_bo_device *bdev, |
struct ttm_buffer_object *bo, |
unsigned long size, |
enum ttm_bo_type type, |
struct ttm_placement *placement, |
uint32_t page_alignment, |
unsigned long buffer_start, |
bool interrubtible, |
struct file *persistant_swap_storage, |
size_t acc_size, |
void (*destroy) (struct ttm_buffer_object *)); |
/** |
* ttm_bo_synccpu_object_init |
* |
* @bdev: Pointer to a ttm_bo_device struct. |
* @bo: Pointer to a ttm_buffer_object to be initialized. |
* @size: Requested size of buffer object. |
* @type: Requested type of buffer object. |
* @flags: Initial placement flags. |
* @page_alignment: Data alignment in pages. |
* @buffer_start: Virtual address of user space data backing a |
* user buffer object. |
* @interruptible: If needing to sleep while waiting for GPU resources, |
* sleep interruptible. |
* @persistant_swap_storage: Usually the swap storage is deleted for buffers |
* pinned in physical memory. If this behaviour is not desired, this member |
* holds a pointer to a persistant shmem object. Typically, this would |
* point to the shmem object backing a GEM object if TTM is used to back a |
* GEM user interface. |
* @p_bo: On successful completion *p_bo points to the created object. |
* |
* This function allocates a ttm_buffer_object, and then calls ttm_bo_init |
* on that object. The destroy function is set to kfree(). |
* Returns |
* -ENOMEM: Out of memory. |
* -EINVAL: Invalid placement flags. |
* -ERESTARTSYS: Interrupted by signal while waiting for resources. |
*/ |
extern int ttm_bo_create(struct ttm_bo_device *bdev, |
unsigned long size, |
enum ttm_bo_type type, |
struct ttm_placement *placement, |
uint32_t page_alignment, |
unsigned long buffer_start, |
bool interruptible, |
struct file *persistant_swap_storage, |
struct ttm_buffer_object **p_bo); |
/** |
* ttm_bo_check_placement |
* |
* @bo: the buffer object. |
* @placement: placements |
* |
* Performs minimal validity checking on an intended change of |
* placement flags. |
* Returns |
* -EINVAL: Intended change is invalid or not allowed. |
*/ |
extern int ttm_bo_check_placement(struct ttm_buffer_object *bo, |
struct ttm_placement *placement); |
/** |
* ttm_bo_init_mm |
* |
* @bdev: Pointer to a ttm_bo_device struct. |
* @mem_type: The memory type. |
* @p_size: size managed area in pages. |
* |
* Initialize a manager for a given memory type. |
* Note: if part of driver firstopen, it must be protected from a |
* potentially racing lastclose. |
* Returns: |
* -EINVAL: invalid size or memory type. |
* -ENOMEM: Not enough memory. |
* May also return driver-specified errors. |
*/ |
extern int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type, |
unsigned long p_size); |
/** |
* ttm_bo_clean_mm |
* |
* @bdev: Pointer to a ttm_bo_device struct. |
* @mem_type: The memory type. |
* |
* Take down a manager for a given memory type after first walking |
* the LRU list to evict any buffers left alive. |
* |
* Normally, this function is part of lastclose() or unload(), and at that |
* point there shouldn't be any buffers left created by user-space, since |
* there should've been removed by the file descriptor release() method. |
* However, before this function is run, make sure to signal all sync objects, |
* and verify that the delayed delete queue is empty. The driver must also |
* make sure that there are no NO_EVICT buffers present in this memory type |
* when the call is made. |
* |
* If this function is part of a VT switch, the caller must make sure that |
* there are no appications currently validating buffers before this |
* function is called. The caller can do that by first taking the |
* struct ttm_bo_device::ttm_lock in write mode. |
* |
* Returns: |
* -EINVAL: invalid or uninitialized memory type. |
* -EBUSY: There are still buffers left in this memory type. |
*/ |
extern int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type); |
/** |
* ttm_bo_evict_mm |
* |
* @bdev: Pointer to a ttm_bo_device struct. |
* @mem_type: The memory type. |
* |
* Evicts all buffers on the lru list of the memory type. |
* This is normally part of a VT switch or an |
* out-of-memory-space-due-to-fragmentation handler. |
* The caller must make sure that there are no other processes |
* currently validating buffers, and can do that by taking the |
* struct ttm_bo_device::ttm_lock in write mode. |
* |
* Returns: |
* -EINVAL: Invalid or uninitialized memory type. |
* -ERESTARTSYS: The call was interrupted by a signal while waiting to |
* evict a buffer. |
*/ |
extern int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type); |
/** |
* ttm_kmap_obj_virtual |
* |
* @map: A struct ttm_bo_kmap_obj returned from ttm_bo_kmap. |
* @is_iomem: Pointer to an integer that on return indicates 1 if the |
* virtual map is io memory, 0 if normal memory. |
* |
* Returns the virtual address of a buffer object area mapped by ttm_bo_kmap. |
* If *is_iomem is 1 on return, the virtual address points to an io memory area, |
* that should strictly be accessed by the iowriteXX() and similar functions. |
*/ |
static inline void *ttm_kmap_obj_virtual(struct ttm_bo_kmap_obj *map, |
bool *is_iomem) |
{ |
*is_iomem = !!(map->bo_kmap_type & TTM_BO_MAP_IOMEM_MASK); |
return map->virtual; |
} |
/** |
* ttm_bo_kmap |
* |
* @bo: The buffer object. |
* @start_page: The first page to map. |
* @num_pages: Number of pages to map. |
* @map: pointer to a struct ttm_bo_kmap_obj representing the map. |
* |
* Sets up a kernel virtual mapping, using ioremap, vmap or kmap to the |
* data in the buffer object. The ttm_kmap_obj_virtual function can then be |
* used to obtain a virtual address to the data. |
* |
* Returns |
* -ENOMEM: Out of memory. |
* -EINVAL: Invalid range. |
*/ |
extern int ttm_bo_kmap(struct ttm_buffer_object *bo, unsigned long start_page, |
unsigned long num_pages, struct ttm_bo_kmap_obj *map); |
/** |
* ttm_bo_kunmap |
* |
* @map: Object describing the map to unmap. |
* |
* Unmaps a kernel map set up by ttm_bo_kmap. |
*/ |
extern void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map); |
#if 0 |
#endif |
/** |
* ttm_fbdev_mmap - mmap fbdev memory backed by a ttm buffer object. |
* |
* @vma: vma as input from the fbdev mmap method. |
* @bo: The bo backing the address space. The address space will |
* have the same size as the bo, and start at offset 0. |
* |
* This function is intended to be called by the fbdev mmap method |
* if the fbdev address space is to be backed by a bo. |
*/ |
extern int ttm_fbdev_mmap(struct vm_area_struct *vma, |
struct ttm_buffer_object *bo); |
/** |
* ttm_bo_mmap - mmap out of the ttm device address space. |
* |
* @filp: filp as input from the mmap method. |
* @vma: vma as input from the mmap method. |
* @bdev: Pointer to the ttm_bo_device with the address space manager. |
* |
* This function is intended to be called by the device mmap method. |
* if the device address space is to be backed by the bo manager. |
*/ |
extern int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma, |
struct ttm_bo_device *bdev); |
/** |
* ttm_bo_io |
* |
* @bdev: Pointer to the struct ttm_bo_device. |
* @filp: Pointer to the struct file attempting to read / write. |
* @wbuf: User-space pointer to address of buffer to write. NULL on read. |
* @rbuf: User-space pointer to address of buffer to read into. |
* Null on write. |
* @count: Number of bytes to read / write. |
* @f_pos: Pointer to current file position. |
* @write: 1 for read, 0 for write. |
* |
* This function implements read / write into ttm buffer objects, and is |
* intended to |
* be called from the fops::read and fops::write method. |
* Returns: |
* See man (2) write, man(2) read. In particular, |
* the function may return -ERESTARTSYS if |
* interrupted by a signal. |
*/ |
extern ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp, |
const char __user *wbuf, char __user *rbuf, |
size_t count, loff_t *f_pos, bool write); |
extern void ttm_bo_swapout_all(struct ttm_bo_device *bdev); |
#endif |
/drivers/include/drm/ttm/ttm_bo_driver.h |
---|
0,0 → 1,924 |
/************************************************************************** |
* |
* Copyright (c) 2006-2009 Vmware, Inc., Palo Alto, CA., USA |
* All Rights Reserved. |
* |
* Permission is hereby granted, free of charge, to any person obtaining a |
* copy of this software and associated documentation files (the |
* "Software"), to deal in the Software without restriction, including |
* without limitation the rights to use, copy, modify, merge, publish, |
* distribute, sub license, and/or sell copies of the Software, and to |
* permit persons to whom the Software is furnished to do so, subject to |
* the following conditions: |
* |
* The above copyright notice and this permission notice (including the |
* next paragraph) shall be included in all copies or substantial portions |
* of the Software. |
* |
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL |
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, |
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR |
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE |
* USE OR OTHER DEALINGS IN THE SOFTWARE. |
* |
**************************************************************************/ |
/* |
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> |
*/ |
#ifndef _TTM_BO_DRIVER_H_ |
#define _TTM_BO_DRIVER_H_ |
#include "ttm/ttm_bo_api.h" |
#include "ttm/ttm_memory.h" |
#include "ttm/ttm_module.h" |
#include "drm_mm.h" |
#include "linux/spinlock.h" |
struct ttm_backend; |
struct ttm_backend_func { |
/** |
* struct ttm_backend_func member populate |
* |
* @backend: Pointer to a struct ttm_backend. |
* @num_pages: Number of pages to populate. |
* @pages: Array of pointers to ttm pages. |
* @dummy_read_page: Page to be used instead of NULL pages in the |
* array @pages. |
* |
* Populate the backend with ttm pages. Depending on the backend, |
* it may or may not copy the @pages array. |
*/ |
int (*populate) (struct ttm_backend *backend, |
unsigned long num_pages, struct page **pages, |
struct page *dummy_read_page); |
/** |
* struct ttm_backend_func member clear |
* |
* @backend: Pointer to a struct ttm_backend. |
* |
* This is an "unpopulate" function. Release all resources |
* allocated with populate. |
*/ |
void (*clear) (struct ttm_backend *backend); |
/** |
* struct ttm_backend_func member bind |
* |
* @backend: Pointer to a struct ttm_backend. |
* @bo_mem: Pointer to a struct ttm_mem_reg describing the |
* memory type and location for binding. |
* |
* Bind the backend pages into the aperture in the location |
* indicated by @bo_mem. This function should be able to handle |
* differences between aperture- and system page sizes. |
*/ |
int (*bind) (struct ttm_backend *backend, struct ttm_mem_reg *bo_mem); |
/** |
* struct ttm_backend_func member unbind |
* |
* @backend: Pointer to a struct ttm_backend. |
* |
* Unbind previously bound backend pages. This function should be |
* able to handle differences between aperture- and system page sizes. |
*/ |
int (*unbind) (struct ttm_backend *backend); |
/** |
* struct ttm_backend_func member destroy |
* |
* @backend: Pointer to a struct ttm_backend. |
* |
* Destroy the backend. |
*/ |
void (*destroy) (struct ttm_backend *backend); |
}; |
/** |
* struct ttm_backend |
* |
* @bdev: Pointer to a struct ttm_bo_device. |
* @flags: For driver use. |
* @func: Pointer to a struct ttm_backend_func that describes |
* the backend methods. |
* |
*/ |
struct ttm_backend { |
struct ttm_bo_device *bdev; |
uint32_t flags; |
struct ttm_backend_func *func; |
}; |
#define TTM_PAGE_FLAG_VMALLOC (1 << 0) |
#define TTM_PAGE_FLAG_USER (1 << 1) |
#define TTM_PAGE_FLAG_USER_DIRTY (1 << 2) |
#define TTM_PAGE_FLAG_WRITE (1 << 3) |
#define TTM_PAGE_FLAG_SWAPPED (1 << 4) |
#define TTM_PAGE_FLAG_PERSISTANT_SWAP (1 << 5) |
#define TTM_PAGE_FLAG_ZERO_ALLOC (1 << 6) |
#define TTM_PAGE_FLAG_DMA32 (1 << 7) |
enum ttm_caching_state { |
tt_uncached, |
tt_wc, |
tt_cached |
}; |
/** |
* struct ttm_tt |
* |
* @dummy_read_page: Page to map where the ttm_tt page array contains a NULL |
* pointer. |
* @pages: Array of pages backing the data. |
* @first_himem_page: Himem pages are put last in the page array, which |
* enables us to run caching attribute changes on only the first part |
* of the page array containing lomem pages. This is the index of the |
* first himem page. |
* @last_lomem_page: Index of the last lomem page in the page array. |
* @num_pages: Number of pages in the page array. |
* @bdev: Pointer to the current struct ttm_bo_device. |
* @be: Pointer to the ttm backend. |
* @tsk: The task for user ttm. |
* @start: virtual address for user ttm. |
* @swap_storage: Pointer to shmem struct file for swap storage. |
* @caching_state: The current caching state of the pages. |
* @state: The current binding state of the pages. |
* |
* This is a structure holding the pages, caching- and aperture binding |
* status for a buffer object that isn't backed by fixed (VRAM / AGP) |
* memory. |
*/ |
struct ttm_tt { |
struct page *dummy_read_page; |
struct page **pages; |
long first_himem_page; |
long last_lomem_page; |
uint32_t page_flags; |
unsigned long num_pages; |
struct ttm_bo_global *glob; |
struct ttm_backend *be; |
struct task_struct *tsk; |
unsigned long start; |
struct file *swap_storage; |
enum ttm_caching_state caching_state; |
enum { |
tt_bound, |
tt_unbound, |
tt_unpopulated, |
} state; |
}; |
#define TTM_MEMTYPE_FLAG_FIXED (1 << 0) /* Fixed (on-card) PCI memory */ |
#define TTM_MEMTYPE_FLAG_MAPPABLE (1 << 1) /* Memory mappable */ |
#define TTM_MEMTYPE_FLAG_NEEDS_IOREMAP (1 << 2) /* Fixed memory needs ioremap |
before kernel access. */ |
#define TTM_MEMTYPE_FLAG_CMA (1 << 3) /* Can't map aperture */ |
/** |
* struct ttm_mem_type_manager |
* |
* @has_type: The memory type has been initialized. |
* @use_type: The memory type is enabled. |
* @flags: TTM_MEMTYPE_XX flags identifying the traits of the memory |
* managed by this memory type. |
* @gpu_offset: If used, the GPU offset of the first managed page of |
* fixed memory or the first managed location in an aperture. |
* @io_offset: The io_offset of the first managed page of IO memory or |
* the first managed location in an aperture. For TTM_MEMTYPE_FLAG_CMA |
* memory, this should be set to NULL. |
* @io_size: The size of a managed IO region (fixed memory or aperture). |
* @io_addr: Virtual kernel address if the io region is pre-mapped. For |
* TTM_MEMTYPE_FLAG_NEEDS_IOREMAP there is no pre-mapped io map and |
* @io_addr should be set to NULL. |
* @size: Size of the managed region. |
* @available_caching: A mask of available caching types, TTM_PL_FLAG_XX, |
* as defined in ttm_placement_common.h |
* @default_caching: The default caching policy used for a buffer object |
* placed in this memory type if the user doesn't provide one. |
* @manager: The range manager used for this memory type. FIXME: If the aperture |
* has a page size different from the underlying system, the granularity |
* of this manager should take care of this. But the range allocating code |
* in ttm_bo.c needs to be modified for this. |
* @lru: The lru list for this memory type. |
* |
* This structure is used to identify and manage memory types for a device. |
* It's set up by the ttm_bo_driver::init_mem_type method. |
*/ |
struct ttm_mem_type_manager { |
/* |
* No protection. Constant from start. |
*/ |
bool has_type; |
bool use_type; |
uint32_t flags; |
unsigned long gpu_offset; |
unsigned long io_offset; |
unsigned long io_size; |
void *io_addr; |
uint64_t size; |
uint32_t available_caching; |
uint32_t default_caching; |
/* |
* Protected by the bdev->lru_lock. |
* TODO: Consider one lru_lock per ttm_mem_type_manager. |
* Plays ill with list removal, though. |
*/ |
struct drm_mm manager; |
struct list_head lru; |
}; |
/** |
* struct ttm_bo_driver |
* |
* @create_ttm_backend_entry: Callback to create a struct ttm_backend. |
* @invalidate_caches: Callback to invalidate read caches when a buffer object |
* has been evicted. |
* @init_mem_type: Callback to initialize a struct ttm_mem_type_manager |
* structure. |
* @evict_flags: Callback to obtain placement flags when a buffer is evicted. |
* @move: Callback for a driver to hook in accelerated functions to |
* move a buffer. |
* If set to NULL, a potentially slow memcpy() move is used. |
* @sync_obj_signaled: See ttm_fence_api.h |
* @sync_obj_wait: See ttm_fence_api.h |
* @sync_obj_flush: See ttm_fence_api.h |
* @sync_obj_unref: See ttm_fence_api.h |
* @sync_obj_ref: See ttm_fence_api.h |
*/ |
struct ttm_bo_driver { |
/** |
* struct ttm_bo_driver member create_ttm_backend_entry |
* |
* @bdev: The buffer object device. |
* |
* Create a driver specific struct ttm_backend. |
*/ |
struct ttm_backend *(*create_ttm_backend_entry) |
(struct ttm_bo_device *bdev); |
/** |
* struct ttm_bo_driver member invalidate_caches |
* |
* @bdev: the buffer object device. |
* @flags: new placement of the rebound buffer object. |
* |
* A previosly evicted buffer has been rebound in a |
* potentially new location. Tell the driver that it might |
* consider invalidating read (texture) caches on the next command |
* submission as a consequence. |
*/ |
int (*invalidate_caches) (struct ttm_bo_device *bdev, uint32_t flags); |
int (*init_mem_type) (struct ttm_bo_device *bdev, uint32_t type, |
struct ttm_mem_type_manager *man); |
/** |
* struct ttm_bo_driver member evict_flags: |
* |
* @bo: the buffer object to be evicted |
* |
* Return the bo flags for a buffer which is not mapped to the hardware. |
* These will be placed in proposed_flags so that when the move is |
* finished, they'll end up in bo->mem.flags |
*/ |
void(*evict_flags) (struct ttm_buffer_object *bo, |
struct ttm_placement *placement); |
/** |
* struct ttm_bo_driver member move: |
* |
* @bo: the buffer to move |
* @evict: whether this motion is evicting the buffer from |
* the graphics address space |
* @interruptible: Use interruptible sleeps if possible when sleeping. |
* @no_wait: whether this should give up and return -EBUSY |
* if this move would require sleeping |
* @new_mem: the new memory region receiving the buffer |
* |
* Move a buffer between two memory regions. |
*/ |
int (*move) (struct ttm_buffer_object *bo, |
bool evict, bool interruptible, |
bool no_wait, struct ttm_mem_reg *new_mem); |
/** |
* struct ttm_bo_driver_member verify_access |
* |
* @bo: Pointer to a buffer object. |
* @filp: Pointer to a struct file trying to access the object. |
* |
* Called from the map / write / read methods to verify that the |
* caller is permitted to access the buffer object. |
* This member may be set to NULL, which will refuse this kind of |
* access for all buffer objects. |
* This function should return 0 if access is granted, -EPERM otherwise. |
*/ |
int (*verify_access) (struct ttm_buffer_object *bo, |
struct file *filp); |
/** |
* In case a driver writer dislikes the TTM fence objects, |
* the driver writer can replace those with sync objects of |
* his / her own. If it turns out that no driver writer is |
* using these. I suggest we remove these hooks and plug in |
* fences directly. The bo driver needs the following functionality: |
* See the corresponding functions in the fence object API |
* documentation. |
*/ |
bool (*sync_obj_signaled) (void *sync_obj, void *sync_arg); |
int (*sync_obj_wait) (void *sync_obj, void *sync_arg, |
bool lazy, bool interruptible); |
int (*sync_obj_flush) (void *sync_obj, void *sync_arg); |
void (*sync_obj_unref) (void **sync_obj); |
void *(*sync_obj_ref) (void *sync_obj); |
/* hook to notify driver about a driver move so it |
* can do tiling things */ |
void (*move_notify)(struct ttm_buffer_object *bo, |
struct ttm_mem_reg *new_mem); |
/* notify the driver we are taking a fault on this BO |
* and have reserved it */ |
void (*fault_reserve_notify)(struct ttm_buffer_object *bo); |
}; |
/** |
* struct ttm_bo_global_ref - Argument to initialize a struct ttm_bo_global. |
*/ |
struct ttm_bo_global_ref { |
struct ttm_global_reference ref; |
struct ttm_mem_global *mem_glob; |
}; |
/** |
* struct ttm_bo_global - Buffer object driver global data. |
* |
* @mem_glob: Pointer to a struct ttm_mem_global object for accounting. |
* @dummy_read_page: Pointer to a dummy page used for mapping requests |
* of unpopulated pages. |
* @shrink: A shrink callback object used for buffer object swap. |
* @ttm_bo_extra_size: Extra size (sizeof(struct ttm_buffer_object) excluded) |
* used by a buffer object. This is excluding page arrays and backing pages. |
* @ttm_bo_size: This is @ttm_bo_extra_size + sizeof(struct ttm_buffer_object). |
* @device_list_mutex: Mutex protecting the device list. |
* This mutex is held while traversing the device list for pm options. |
* @lru_lock: Spinlock protecting the bo subsystem lru lists. |
* @device_list: List of buffer object devices. |
* @swap_lru: Lru list of buffer objects used for swapping. |
*/ |
struct ttm_bo_global { |
/** |
* Constant after init. |
*/ |
// struct kobject kobj; |
struct ttm_mem_global *mem_glob; |
struct page *dummy_read_page; |
struct ttm_mem_shrink shrink; |
size_t ttm_bo_extra_size; |
size_t ttm_bo_size; |
// struct mutex device_list_mutex; |
spinlock_t lru_lock; |
/** |
* Protected by device_list_mutex. |
*/ |
struct list_head device_list; |
/** |
* Protected by the lru_lock. |
*/ |
struct list_head swap_lru; |
/** |
* Internal protection. |
*/ |
atomic_t bo_count; |
}; |
#define TTM_NUM_MEM_TYPES 8 |
#define TTM_BO_PRIV_FLAG_MOVING 0 /* Buffer object is moving and needs |
idling before CPU mapping */ |
#define TTM_BO_PRIV_FLAG_MAX 1 |
/** |
* struct ttm_bo_device - Buffer object driver device-specific data. |
* |
* @driver: Pointer to a struct ttm_bo_driver struct setup by the driver. |
* @man: An array of mem_type_managers. |
* @addr_space_mm: Range manager for the device address space. |
* lru_lock: Spinlock that protects the buffer+device lru lists and |
* ddestroy lists. |
* @nice_mode: Try nicely to wait for buffer idle when cleaning a manager. |
* If a GPU lockup has been detected, this is forced to 0. |
* @dev_mapping: A pointer to the struct address_space representing the |
* device address space. |
* @wq: Work queue structure for the delayed delete workqueue. |
* |
*/ |
struct ttm_bo_device { |
/* |
* Constant after bo device init / atomic. |
*/ |
struct list_head device_list; |
struct ttm_bo_global *glob; |
struct ttm_bo_driver *driver; |
rwlock_t vm_lock; |
struct ttm_mem_type_manager man[TTM_NUM_MEM_TYPES]; |
/* |
* Protected by the vm lock. |
*/ |
// struct rb_root addr_space_rb; |
struct drm_mm addr_space_mm; |
/* |
* Protected by the global:lru lock. |
*/ |
struct list_head ddestroy; |
/* |
* Protected by load / firstopen / lastclose /unload sync. |
*/ |
bool nice_mode; |
struct address_space *dev_mapping; |
/* |
* Internal protection. |
*/ |
// struct delayed_work wq; |
bool need_dma32; |
}; |
/** |
* ttm_flag_masked |
* |
* @old: Pointer to the result and original value. |
* @new: New value of bits. |
* @mask: Mask of bits to change. |
* |
* Convenience function to change a number of bits identified by a mask. |
*/ |
static inline uint32_t |
ttm_flag_masked(uint32_t *old, uint32_t new, uint32_t mask) |
{ |
*old ^= (*old ^ new) & mask; |
return *old; |
} |
/** |
* ttm_tt_create |
* |
* @bdev: pointer to a struct ttm_bo_device: |
* @size: Size of the data needed backing. |
* @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags. |
* @dummy_read_page: See struct ttm_bo_device. |
* |
* Create a struct ttm_tt to back data with system memory pages. |
* No pages are actually allocated. |
* Returns: |
* NULL: Out of memory. |
*/ |
extern struct ttm_tt *ttm_tt_create(struct ttm_bo_device *bdev, |
unsigned long size, |
uint32_t page_flags, |
struct page *dummy_read_page); |
/** |
* ttm_tt_set_user: |
* |
* @ttm: The struct ttm_tt to populate. |
* @tsk: A struct task_struct for which @start is a valid user-space address. |
* @start: A valid user-space address. |
* @num_pages: Size in pages of the user memory area. |
* |
* Populate a struct ttm_tt with a user-space memory area after first pinning |
* the pages backing it. |
* Returns: |
* !0: Error. |
*/ |
extern int ttm_tt_set_user(struct ttm_tt *ttm, |
struct task_struct *tsk, |
unsigned long start, unsigned long num_pages); |
/** |
* ttm_ttm_bind: |
* |
* @ttm: The struct ttm_tt containing backing pages. |
* @bo_mem: The struct ttm_mem_reg identifying the binding location. |
* |
* Bind the pages of @ttm to an aperture location identified by @bo_mem |
*/ |
extern int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem); |
/** |
* ttm_tt_populate: |
* |
* @ttm: The struct ttm_tt to contain the backing pages. |
* |
* Add backing pages to all of @ttm |
*/ |
extern int ttm_tt_populate(struct ttm_tt *ttm); |
/** |
* ttm_ttm_destroy: |
* |
* @ttm: The struct ttm_tt. |
* |
* Unbind, unpopulate and destroy a struct ttm_tt. |
*/ |
extern void ttm_tt_destroy(struct ttm_tt *ttm); |
/** |
* ttm_ttm_unbind: |
* |
* @ttm: The struct ttm_tt. |
* |
* Unbind a struct ttm_tt. |
*/ |
extern void ttm_tt_unbind(struct ttm_tt *ttm); |
/** |
* ttm_ttm_destroy: |
* |
* @ttm: The struct ttm_tt. |
* @index: Index of the desired page. |
* |
* Return a pointer to the struct page backing @ttm at page |
* index @index. If the page is unpopulated, one will be allocated to |
* populate that index. |
* |
* Returns: |
* NULL on OOM. |
*/ |
extern struct page *ttm_tt_get_page(struct ttm_tt *ttm, int index); |
/** |
* ttm_tt_cache_flush: |
* |
* @pages: An array of pointers to struct page:s to flush. |
* @num_pages: Number of pages to flush. |
* |
* Flush the data of the indicated pages from the cpu caches. |
* This is used when changing caching attributes of the pages from |
* cache-coherent. |
*/ |
extern void ttm_tt_cache_flush(struct page *pages[], unsigned long num_pages); |
/** |
* ttm_tt_set_placement_caching: |
* |
* @ttm A struct ttm_tt the backing pages of which will change caching policy. |
* @placement: Flag indicating the desired caching policy. |
* |
* This function will change caching policy of any default kernel mappings of |
* the pages backing @ttm. If changing from cached to uncached or |
* write-combined, |
* all CPU caches will first be flushed to make sure the data of the pages |
* hit RAM. This function may be very costly as it involves global TLB |
* and cache flushes and potential page splitting / combining. |
*/ |
extern int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement); |
extern int ttm_tt_swapout(struct ttm_tt *ttm, |
struct file *persistant_swap_storage); |
/* |
* ttm_bo.c |
*/ |
/** |
* ttm_mem_reg_is_pci |
* |
* @bdev: Pointer to a struct ttm_bo_device. |
* @mem: A valid struct ttm_mem_reg. |
* |
* Returns true if the memory described by @mem is PCI memory, |
* false otherwise. |
*/ |
extern bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, |
struct ttm_mem_reg *mem); |
/** |
* ttm_bo_mem_space |
* |
* @bo: Pointer to a struct ttm_buffer_object. the data of which |
* we want to allocate space for. |
* @proposed_placement: Proposed new placement for the buffer object. |
* @mem: A struct ttm_mem_reg. |
* @interruptible: Sleep interruptible when sliping. |
* @no_wait: Don't sleep waiting for space to become available. |
* |
* Allocate memory space for the buffer object pointed to by @bo, using |
* the placement flags in @mem, potentially evicting other idle buffer objects. |
* This function may sleep while waiting for space to become available. |
* Returns: |
* -EBUSY: No space available (only if no_wait == 1). |
* -ENOMEM: Could not allocate memory for the buffer object, either due to |
* fragmentation or concurrent allocators. |
* -ERESTARTSYS: An interruptible sleep was interrupted by a signal. |
*/ |
extern int ttm_bo_mem_space(struct ttm_buffer_object *bo, |
struct ttm_placement *placement, |
struct ttm_mem_reg *mem, |
bool interruptible, bool no_wait); |
/** |
* ttm_bo_wait_for_cpu |
* |
* @bo: Pointer to a struct ttm_buffer_object. |
* @no_wait: Don't sleep while waiting. |
* |
* Wait until a buffer object is no longer sync'ed for CPU access. |
* Returns: |
* -EBUSY: Buffer object was sync'ed for CPU access. (only if no_wait == 1). |
* -ERESTARTSYS: An interruptible sleep was interrupted by a signal. |
*/ |
extern int ttm_bo_wait_cpu(struct ttm_buffer_object *bo, bool no_wait); |
/** |
* ttm_bo_pci_offset - Get the PCI offset for the buffer object memory. |
* |
* @bo Pointer to a struct ttm_buffer_object. |
* @bus_base On return the base of the PCI region |
* @bus_offset On return the byte offset into the PCI region |
* @bus_size On return the byte size of the buffer object or zero if |
* the buffer object memory is not accessible through a PCI region. |
* |
* Returns: |
* -EINVAL if the buffer object is currently not mappable. |
* 0 otherwise. |
*/ |
extern int ttm_bo_pci_offset(struct ttm_bo_device *bdev, |
struct ttm_mem_reg *mem, |
unsigned long *bus_base, |
unsigned long *bus_offset, |
unsigned long *bus_size); |
extern void ttm_bo_global_release(struct ttm_global_reference *ref); |
extern int ttm_bo_global_init(struct ttm_global_reference *ref); |
extern int ttm_bo_device_release(struct ttm_bo_device *bdev); |
/** |
* ttm_bo_device_init |
* |
* @bdev: A pointer to a struct ttm_bo_device to initialize. |
* @mem_global: A pointer to an initialized struct ttm_mem_global. |
* @driver: A pointer to a struct ttm_bo_driver set up by the caller. |
* @file_page_offset: Offset into the device address space that is available |
* for buffer data. This ensures compatibility with other users of the |
* address space. |
* |
* Initializes a struct ttm_bo_device: |
* Returns: |
* !0: Failure. |
*/ |
extern int ttm_bo_device_init(struct ttm_bo_device *bdev, |
struct ttm_bo_global *glob, |
struct ttm_bo_driver *driver, |
uint64_t file_page_offset, bool need_dma32); |
/** |
* ttm_bo_unmap_virtual |
* |
* @bo: tear down the virtual mappings for this BO |
*/ |
extern void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo); |
/** |
* ttm_bo_reserve: |
* |
* @bo: A pointer to a struct ttm_buffer_object. |
* @interruptible: Sleep interruptible if waiting. |
* @no_wait: Don't sleep while trying to reserve, rather return -EBUSY. |
* @use_sequence: If @bo is already reserved, Only sleep waiting for |
* it to become unreserved if @sequence < (@bo)->sequence. |
* |
* Locks a buffer object for validation. (Or prevents other processes from |
* locking it for validation) and removes it from lru lists, while taking |
* a number of measures to prevent deadlocks. |
* |
* Deadlocks may occur when two processes try to reserve multiple buffers in |
* different order, either by will or as a result of a buffer being evicted |
* to make room for a buffer already reserved. (Buffers are reserved before |
* they are evicted). The following algorithm prevents such deadlocks from |
* occuring: |
* 1) Buffers are reserved with the lru spinlock held. Upon successful |
* reservation they are removed from the lru list. This stops a reserved buffer |
* from being evicted. However the lru spinlock is released between the time |
* a buffer is selected for eviction and the time it is reserved. |
* Therefore a check is made when a buffer is reserved for eviction, that it |
* is still the first buffer in the lru list, before it is removed from the |
* list. @check_lru == 1 forces this check. If it fails, the function returns |
* -EINVAL, and the caller should then choose a new buffer to evict and repeat |
* the procedure. |
* 2) Processes attempting to reserve multiple buffers other than for eviction, |
* (typically execbuf), should first obtain a unique 32-bit |
* validation sequence number, |
* and call this function with @use_sequence == 1 and @sequence == the unique |
* sequence number. If upon call of this function, the buffer object is already |
* reserved, the validation sequence is checked against the validation |
* sequence of the process currently reserving the buffer, |
* and if the current validation sequence is greater than that of the process |
* holding the reservation, the function returns -EAGAIN. Otherwise it sleeps |
* waiting for the buffer to become unreserved, after which it retries |
* reserving. |
* The caller should, when receiving an -EAGAIN error |
* release all its buffer reservations, wait for @bo to become unreserved, and |
* then rerun the validation with the same validation sequence. This procedure |
* will always guarantee that the process with the lowest validation sequence |
* will eventually succeed, preventing both deadlocks and starvation. |
* |
* Returns: |
* -EAGAIN: The reservation may cause a deadlock. |
* Release all buffer reservations, wait for @bo to become unreserved and |
* try again. (only if use_sequence == 1). |
* -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by |
* a signal. Release all buffer reservations and return to user-space. |
*/ |
extern int ttm_bo_reserve(struct ttm_buffer_object *bo, |
bool interruptible, |
bool no_wait, bool use_sequence, uint32_t sequence); |
/** |
* ttm_bo_unreserve |
* |
* @bo: A pointer to a struct ttm_buffer_object. |
* |
* Unreserve a previous reservation of @bo. |
*/ |
extern void ttm_bo_unreserve(struct ttm_buffer_object *bo); |
/** |
* ttm_bo_wait_unreserved |
* |
* @bo: A pointer to a struct ttm_buffer_object. |
* |
* Wait for a struct ttm_buffer_object to become unreserved. |
* This is typically used in the execbuf code to relax cpu-usage when |
* a potential deadlock condition backoff. |
*/ |
extern int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, |
bool interruptible); |
/** |
* ttm_bo_block_reservation |
* |
* @bo: A pointer to a struct ttm_buffer_object. |
* @interruptible: Use interruptible sleep when waiting. |
* @no_wait: Don't sleep, but rather return -EBUSY. |
* |
* Block reservation for validation by simply reserving the buffer. |
* This is intended for single buffer use only without eviction, |
* and thus needs no deadlock protection. |
* |
* Returns: |
* -EBUSY: If no_wait == 1 and the buffer is already reserved. |
* -ERESTARTSYS: If interruptible == 1 and the process received a signal |
* while sleeping. |
*/ |
extern int ttm_bo_block_reservation(struct ttm_buffer_object *bo, |
bool interruptible, bool no_wait); |
/** |
* ttm_bo_unblock_reservation |
* |
* @bo: A pointer to a struct ttm_buffer_object. |
* |
* Unblocks reservation leaving lru lists untouched. |
*/ |
extern void ttm_bo_unblock_reservation(struct ttm_buffer_object *bo); |
/* |
* ttm_bo_util.c |
*/ |
/** |
* ttm_bo_move_ttm |
* |
* @bo: A pointer to a struct ttm_buffer_object. |
* @evict: 1: This is an eviction. Don't try to pipeline. |
* @no_wait: Never sleep, but rather return with -EBUSY. |
* @new_mem: struct ttm_mem_reg indicating where to move. |
* |
* Optimized move function for a buffer object with both old and |
* new placement backed by a TTM. The function will, if successful, |
* free any old aperture space, and set (@new_mem)->mm_node to NULL, |
* and update the (@bo)->mem placement flags. If unsuccessful, the old |
* data remains untouched, and it's up to the caller to free the |
* memory space indicated by @new_mem. |
* Returns: |
* !0: Failure. |
*/ |
extern int ttm_bo_move_ttm(struct ttm_buffer_object *bo, |
bool evict, bool no_wait, |
struct ttm_mem_reg *new_mem); |
/** |
* ttm_bo_move_memcpy |
* |
* @bo: A pointer to a struct ttm_buffer_object. |
* @evict: 1: This is an eviction. Don't try to pipeline. |
* @no_wait: Never sleep, but rather return with -EBUSY. |
* @new_mem: struct ttm_mem_reg indicating where to move. |
* |
* Fallback move function for a mappable buffer object in mappable memory. |
* The function will, if successful, |
* free any old aperture space, and set (@new_mem)->mm_node to NULL, |
* and update the (@bo)->mem placement flags. If unsuccessful, the old |
* data remains untouched, and it's up to the caller to free the |
* memory space indicated by @new_mem. |
* Returns: |
* !0: Failure. |
*/ |
extern int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, |
bool evict, |
bool no_wait, struct ttm_mem_reg *new_mem); |
/** |
* ttm_bo_free_old_node |
* |
* @bo: A pointer to a struct ttm_buffer_object. |
* |
* Utility function to free an old placement after a successful move. |
*/ |
extern void ttm_bo_free_old_node(struct ttm_buffer_object *bo); |
/** |
* ttm_bo_move_accel_cleanup. |
* |
* @bo: A pointer to a struct ttm_buffer_object. |
* @sync_obj: A sync object that signals when moving is complete. |
* @sync_obj_arg: An argument to pass to the sync object idle / wait |
* functions. |
* @evict: This is an evict move. Don't return until the buffer is idle. |
* @no_wait: Never sleep, but rather return with -EBUSY. |
* @new_mem: struct ttm_mem_reg indicating where to move. |
* |
* Accelerated move function to be called when an accelerated move |
* has been scheduled. The function will create a new temporary buffer object |
* representing the old placement, and put the sync object on both buffer |
* objects. After that the newly created buffer object is unref'd to be |
* destroyed when the move is complete. This will help pipeline |
* buffer moves. |
*/ |
extern int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, |
void *sync_obj, |
void *sync_obj_arg, |
bool evict, bool no_wait, |
struct ttm_mem_reg *new_mem); |
/** |
* ttm_io_prot |
* |
* @c_state: Caching state. |
* @tmp: Page protection flag for a normal, cached mapping. |
* |
* Utility function that returns the pgprot_t that should be used for |
* setting up a PTE with the caching model indicated by @c_state. |
*/ |
extern pgprot_t ttm_io_prot(enum ttm_caching_state c_state, pgprot_t tmp); |
#if (defined(CONFIG_AGP) || (defined(CONFIG_AGP_MODULE) && defined(MODULE))) |
#define TTM_HAS_AGP |
#include <linux/agp_backend.h> |
/** |
* ttm_agp_backend_init |
* |
* @bdev: Pointer to a struct ttm_bo_device. |
* @bridge: The agp bridge this device is sitting on. |
* |
* Create a TTM backend that uses the indicated AGP bridge as an aperture |
* for TT memory. This function uses the linux agpgart interface to |
* bind and unbind memory backing a ttm_tt. |
*/ |
extern struct ttm_backend *ttm_agp_backend_init(struct ttm_bo_device *bdev, |
struct agp_bridge_data *bridge); |
#endif |
#endif |
/drivers/include/drm/ttm/ttm_memory.h |
---|
0,0 → 1,159 |
/************************************************************************** |
* |
* Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA |
* All Rights Reserved. |
* |
* Permission is hereby granted, free of charge, to any person obtaining a |
* copy of this software and associated documentation files (the |
* "Software"), to deal in the Software without restriction, including |
* without limitation the rights to use, copy, modify, merge, publish, |
* distribute, sub license, and/or sell copies of the Software, and to |
* permit persons to whom the Software is furnished to do so, subject to |
* the following conditions: |
* |
* The above copyright notice and this permission notice (including the |
* next paragraph) shall be included in all copies or substantial portions |
* of the Software. |
* |
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL |
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, |
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR |
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE |
* USE OR OTHER DEALINGS IN THE SOFTWARE. |
* |
**************************************************************************/ |
#ifndef TTM_MEMORY_H |
#define TTM_MEMORY_H |
//#include <linux/workqueue.h> |
#include <linux/spinlock.h> |
//#include <linux/wait.h> |
#include <linux/errno.h> |
//#include <linux/kobject.h> |
//#include <linux/mm.h> |
/** |
* struct ttm_mem_shrink - callback to shrink TTM memory usage. |
* |
* @do_shrink: The callback function. |
* |
* Arguments to the do_shrink functions are intended to be passed using |
* inheritance. That is, the argument class derives from struct ttm_mem_srink, |
* and can be accessed using container_of(). |
*/ |
struct ttm_mem_shrink { |
int (*do_shrink) (struct ttm_mem_shrink *); |
}; |
/** |
* struct ttm_mem_global - Global memory accounting structure. |
* |
* @shrink: A single callback to shrink TTM memory usage. Extend this |
* to a linked list to be able to handle multiple callbacks when needed. |
* @swap_queue: A workqueue to handle shrinking in low memory situations. We |
* need a separate workqueue since it will spend a lot of time waiting |
* for the GPU, and this will otherwise block other workqueue tasks(?) |
* At this point we use only a single-threaded workqueue. |
* @work: The workqueue callback for the shrink queue. |
* @queue: Wait queue for processes suspended waiting for memory. |
* @lock: Lock to protect the @shrink - and the memory accounting members, |
* that is, essentially the whole structure with some exceptions. |
* @zones: Array of pointers to accounting zones. |
* @num_zones: Number of populated entries in the @zones array. |
* @zone_kernel: Pointer to the kernel zone. |
* @zone_highmem: Pointer to the highmem zone if there is one. |
* @zone_dma32: Pointer to the dma32 zone if there is one. |
* |
* Note that this structure is not per device. It should be global for all |
* graphics devices. |
*/ |
#define TTM_MEM_MAX_ZONES 2 |
struct ttm_mem_zone; |
struct ttm_mem_global { |
// struct kobject kobj; |
struct ttm_mem_shrink *shrink; |
// struct workqueue_struct *swap_queue; |
// struct work_struct work; |
// wait_queue_head_t queue; |
spinlock_t lock; |
struct ttm_mem_zone *zones[TTM_MEM_MAX_ZONES]; |
unsigned int num_zones; |
struct ttm_mem_zone *zone_kernel; |
#ifdef CONFIG_HIGHMEM |
struct ttm_mem_zone *zone_highmem; |
#else |
struct ttm_mem_zone *zone_dma32; |
#endif |
}; |
/** |
* ttm_mem_init_shrink - initialize a struct ttm_mem_shrink object |
* |
* @shrink: The object to initialize. |
* @func: The callback function. |
*/ |
static inline void ttm_mem_init_shrink(struct ttm_mem_shrink *shrink, |
int (*func) (struct ttm_mem_shrink *)) |
{ |
shrink->do_shrink = func; |
} |
/** |
* ttm_mem_register_shrink - register a struct ttm_mem_shrink object. |
* |
* @glob: The struct ttm_mem_global object to register with. |
* @shrink: An initialized struct ttm_mem_shrink object to register. |
* |
* Returns: |
* -EBUSY: There's already a callback registered. (May change). |
*/ |
static inline int ttm_mem_register_shrink(struct ttm_mem_global *glob, |
struct ttm_mem_shrink *shrink) |
{ |
spin_lock(&glob->lock); |
if (glob->shrink != NULL) { |
spin_unlock(&glob->lock); |
return -EBUSY; |
} |
glob->shrink = shrink; |
spin_unlock(&glob->lock); |
return 0; |
} |
/** |
* ttm_mem_unregister_shrink - unregister a struct ttm_mem_shrink object. |
* |
* @glob: The struct ttm_mem_global object to unregister from. |
* @shrink: A previously registert struct ttm_mem_shrink object. |
* |
*/ |
static inline void ttm_mem_unregister_shrink(struct ttm_mem_global *glob, |
struct ttm_mem_shrink *shrink) |
{ |
spin_lock(&glob->lock); |
BUG_ON(glob->shrink != shrink); |
glob->shrink = NULL; |
spin_unlock(&glob->lock); |
} |
extern int ttm_mem_global_init(struct ttm_mem_global *glob); |
extern void ttm_mem_global_release(struct ttm_mem_global *glob); |
extern int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory, |
bool no_wait, bool interruptible); |
extern void ttm_mem_global_free(struct ttm_mem_global *glob, |
uint64_t amount); |
extern int ttm_mem_global_alloc_page(struct ttm_mem_global *glob, |
struct page *page, |
bool no_wait, bool interruptible); |
extern void ttm_mem_global_free_page(struct ttm_mem_global *glob, |
struct page *page); |
extern size_t ttm_round_pot(size_t size); |
#endif |
/drivers/include/drm/ttm/ttm_module.h |
---|
0,0 → 1,60 |
/************************************************************************** |
* |
* Copyright 2008-2009 VMware, Inc., Palo Alto, CA., USA |
* All Rights Reserved. |
* |
* Permission is hereby granted, free of charge, to any person obtaining a |
* copy of this software and associated documentation files (the |
* "Software"), to deal in the Software without restriction, including |
* without limitation the rights to use, copy, modify, merge, publish, |
* distribute, sub license, and/or sell copies of the Software, and to |
* permit persons to whom the Software is furnished to do so, subject to |
* the following conditions: |
* |
* The above copyright notice and this permission notice (including the |
* next paragraph) shall be included in all copies or substantial portions |
* of the Software. |
* |
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL |
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, |
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR |
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE |
* USE OR OTHER DEALINGS IN THE SOFTWARE. |
* |
**************************************************************************/ |
/* |
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> |
*/ |
#ifndef _TTM_MODULE_H_ |
#define _TTM_MODULE_H_ |
#include <linux/kernel.h> |
struct kobject; |
#define TTM_PFX "[TTM] " |
enum ttm_global_types { |
TTM_GLOBAL_TTM_MEM = 0, |
TTM_GLOBAL_TTM_BO, |
TTM_GLOBAL_TTM_OBJECT, |
TTM_GLOBAL_NUM |
}; |
struct ttm_global_reference { |
enum ttm_global_types global_type; |
size_t size; |
void *object; |
int (*init) (struct ttm_global_reference *); |
void (*release) (struct ttm_global_reference *); |
}; |
extern void ttm_global_init(void); |
extern void ttm_global_release(void); |
extern int ttm_global_item_ref(struct ttm_global_reference *ref); |
extern void ttm_global_item_unref(struct ttm_global_reference *ref); |
extern struct kobject *ttm_get_kobj(void); |
#endif /* _TTM_MODULE_H_ */ |
/drivers/include/drm/ttm/ttm_placement.h |
---|
0,0 → 1,92 |
/************************************************************************** |
* |
* Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA |
* All Rights Reserved. |
* |
* Permission is hereby granted, free of charge, to any person obtaining a |
* copy of this software and associated documentation files (the |
* "Software"), to deal in the Software without restriction, including |
* without limitation the rights to use, copy, modify, merge, publish, |
* distribute, sub license, and/or sell copies of the Software, and to |
* permit persons to whom the Software is furnished to do so, subject to |
* the following conditions: |
* |
* The above copyright notice and this permission notice (including the |
* next paragraph) shall be included in all copies or substantial portions |
* of the Software. |
* |
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL |
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, |
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR |
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE |
* USE OR OTHER DEALINGS IN THE SOFTWARE. |
* |
**************************************************************************/ |
/* |
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> |
*/ |
#ifndef _TTM_PLACEMENT_H_ |
#define _TTM_PLACEMENT_H_ |
/* |
* Memory regions for data placement. |
*/ |
#define TTM_PL_SYSTEM 0 |
#define TTM_PL_TT 1 |
#define TTM_PL_VRAM 2 |
#define TTM_PL_PRIV0 3 |
#define TTM_PL_PRIV1 4 |
#define TTM_PL_PRIV2 5 |
#define TTM_PL_PRIV3 6 |
#define TTM_PL_PRIV4 7 |
#define TTM_PL_PRIV5 8 |
#define TTM_PL_SWAPPED 15 |
#define TTM_PL_FLAG_SYSTEM (1 << TTM_PL_SYSTEM) |
#define TTM_PL_FLAG_TT (1 << TTM_PL_TT) |
#define TTM_PL_FLAG_VRAM (1 << TTM_PL_VRAM) |
#define TTM_PL_FLAG_PRIV0 (1 << TTM_PL_PRIV0) |
#define TTM_PL_FLAG_PRIV1 (1 << TTM_PL_PRIV1) |
#define TTM_PL_FLAG_PRIV2 (1 << TTM_PL_PRIV2) |
#define TTM_PL_FLAG_PRIV3 (1 << TTM_PL_PRIV3) |
#define TTM_PL_FLAG_PRIV4 (1 << TTM_PL_PRIV4) |
#define TTM_PL_FLAG_PRIV5 (1 << TTM_PL_PRIV5) |
#define TTM_PL_FLAG_SWAPPED (1 << TTM_PL_SWAPPED) |
#define TTM_PL_MASK_MEM 0x0000FFFF |
/* |
* Other flags that affects data placement. |
* TTM_PL_FLAG_CACHED indicates cache-coherent mappings |
* if available. |
* TTM_PL_FLAG_SHARED means that another application may |
* reference the buffer. |
* TTM_PL_FLAG_NO_EVICT means that the buffer may never |
* be evicted to make room for other buffers. |
*/ |
#define TTM_PL_FLAG_CACHED (1 << 16) |
#define TTM_PL_FLAG_UNCACHED (1 << 17) |
#define TTM_PL_FLAG_WC (1 << 18) |
#define TTM_PL_FLAG_SHARED (1 << 20) |
#define TTM_PL_FLAG_NO_EVICT (1 << 21) |
#define TTM_PL_MASK_CACHING (TTM_PL_FLAG_CACHED | \ |
TTM_PL_FLAG_UNCACHED | \ |
TTM_PL_FLAG_WC) |
#define TTM_PL_MASK_MEMTYPE (TTM_PL_MASK_MEM | TTM_PL_MASK_CACHING) |
/* |
* Access flags to be used for CPU- and GPU- mappings. |
* The idea is that the TTM synchronization mechanism will |
* allow concurrent READ access and exclusive write access. |
* Currently GPU- and CPU accesses are exclusive. |
*/ |
#define TTM_ACCESS_READ (1 << 0) |
#define TTM_ACCESS_WRITE (1 << 1) |
#endif |
/drivers/include/errno-base.h |
---|
0,0 → 1,39 |
#ifndef _ASM_GENERIC_ERRNO_BASE_H |
#define _ASM_GENERIC_ERRNO_BASE_H |
#define EPERM 1 /* Operation not permitted */ |
#define ENOENT 2 /* No such file or directory */ |
#define ESRCH 3 /* No such process */ |
#define EINTR 4 /* Interrupted system call */ |
#define EIO 5 /* I/O error */ |
#define ENXIO 6 /* No such device or address */ |
#define E2BIG 7 /* Argument list too long */ |
#define ENOEXEC 8 /* Exec format error */ |
#define EBADF 9 /* Bad file number */ |
#define ECHILD 10 /* No child processes */ |
#define EAGAIN 11 /* Try again */ |
#define ENOMEM 12 /* Out of memory */ |
#define EACCES 13 /* Permission denied */ |
#define EFAULT 14 /* Bad address */ |
#define ENOTBLK 15 /* Block device required */ |
#define EBUSY 16 /* Device or resource busy */ |
#define EEXIST 17 /* File exists */ |
#define EXDEV 18 /* Cross-device link */ |
#define ENODEV 19 /* No such device */ |
#define ENOTDIR 20 /* Not a directory */ |
#define EISDIR 21 /* Is a directory */ |
#define EINVAL 22 /* Invalid argument */ |
#define ENFILE 23 /* File table overflow */ |
#define EMFILE 24 /* Too many open files */ |
#define ENOTTY 25 /* Not a typewriter */ |
#define ETXTBSY 26 /* Text file busy */ |
#define EFBIG 27 /* File too large */ |
#define ENOSPC 28 /* No space left on device */ |
#define ESPIPE 29 /* Illegal seek */ |
#define EROFS 30 /* Read-only file system */ |
#define EMLINK 31 /* Too many links */ |
#define EPIPE 32 /* Broken pipe */ |
#define EDOM 33 /* Math argument out of domain of func */ |
#define ERANGE 34 /* Math result not representable */ |
#endif |
/drivers/include/linux/asm/alternative.h |
---|
0,0 → 1,164 |
#ifndef _ASM_X86_ALTERNATIVE_H |
#define _ASM_X86_ALTERNATIVE_H |
#include <linux/types.h> |
#include <linux/stddef.h> |
#include <linux/stringify.h> |
#include <asm/asm.h> |
/* |
* Alternative inline assembly for SMP. |
* |
* The LOCK_PREFIX macro defined here replaces the LOCK and |
* LOCK_PREFIX macros used everywhere in the source tree. |
* |
* SMP alternatives use the same data structures as the other |
* alternatives and the X86_FEATURE_UP flag to indicate the case of a |
* UP system running a SMP kernel. The existing apply_alternatives() |
* works fine for patching a SMP kernel for UP. |
* |
* The SMP alternative tables can be kept after boot and contain both |
* UP and SMP versions of the instructions to allow switching back to |
* SMP at runtime, when hotplugging in a new CPU, which is especially |
* useful in virtualized environments. |
* |
* The very common lock prefix is handled as special case in a |
* separate table which is a pure address list without replacement ptr |
* and size information. That keeps the table sizes small. |
*/ |
#ifdef CONFIG_SMP |
#define LOCK_PREFIX \ |
".section .smp_locks,\"a\"\n" \ |
_ASM_ALIGN "\n" \ |
_ASM_PTR "661f\n" /* address */ \ |
".previous\n" \ |
"661:\n\tlock; " |
#else /* ! CONFIG_SMP */ |
#define LOCK_PREFIX "" |
#endif |
/* This must be included *after* the definition of LOCK_PREFIX */ |
#include <asm/cpufeature.h> |
struct alt_instr { |
u8 *instr; /* original instruction */ |
u8 *replacement; |
u8 cpuid; /* cpuid bit set for replacement */ |
u8 instrlen; /* length of original instruction */ |
u8 replacementlen; /* length of new instruction, <= instrlen */ |
u8 pad1; |
#ifdef CONFIG_X86_64 |
u32 pad2; |
#endif |
}; |
extern void alternative_instructions(void); |
extern void apply_alternatives(struct alt_instr *start, struct alt_instr *end); |
struct module; |
#ifdef CONFIG_SMP |
extern void alternatives_smp_module_add(struct module *mod, char *name, |
void *locks, void *locks_end, |
void *text, void *text_end); |
extern void alternatives_smp_module_del(struct module *mod); |
extern void alternatives_smp_switch(int smp); |
#else |
static inline void alternatives_smp_module_add(struct module *mod, char *name, |
void *locks, void *locks_end, |
void *text, void *text_end) {} |
static inline void alternatives_smp_module_del(struct module *mod) {} |
static inline void alternatives_smp_switch(int smp) {} |
#endif /* CONFIG_SMP */ |
/* alternative assembly primitive: */ |
#define ALTERNATIVE(oldinstr, newinstr, feature) \ |
\ |
"661:\n\t" oldinstr "\n662:\n" \ |
".section .altinstructions,\"a\"\n" \ |
_ASM_ALIGN "\n" \ |
_ASM_PTR "661b\n" /* label */ \ |
_ASM_PTR "663f\n" /* new instruction */ \ |
" .byte " __stringify(feature) "\n" /* feature bit */ \ |
" .byte 662b-661b\n" /* sourcelen */ \ |
" .byte 664f-663f\n" /* replacementlen */ \ |
" .byte 0xff + (664f-663f) - (662b-661b)\n" /* rlen <= slen */ \ |
".previous\n" \ |
".section .altinstr_replacement, \"ax\"\n" \ |
"663:\n\t" newinstr "\n664:\n" /* replacement */ \ |
".previous" |
/* |
* Alternative instructions for different CPU types or capabilities. |
* |
* This allows to use optimized instructions even on generic binary |
* kernels. |
* |
* length of oldinstr must be longer or equal the length of newinstr |
* It can be padded with nops as needed. |
* |
* For non barrier like inlines please define new variants |
* without volatile and memory clobber. |
*/ |
#define alternative(oldinstr, newinstr, feature) \ |
asm volatile (ALTERNATIVE(oldinstr, newinstr, feature) : : : "memory") |
/* |
* Alternative inline assembly with input. |
* |
* Pecularities: |
* No memory clobber here. |
* Argument numbers start with 1. |
* Best is to use constraints that are fixed size (like (%1) ... "r") |
* If you use variable sized constraints like "m" or "g" in the |
* replacement make sure to pad to the worst case length. |
* Leaving an unused argument 0 to keep API compatibility. |
*/ |
#define alternative_input(oldinstr, newinstr, feature, input...) \ |
asm volatile (ALTERNATIVE(oldinstr, newinstr, feature) \ |
: : "i" (0), ## input) |
/* Like alternative_input, but with a single output argument */ |
#define alternative_io(oldinstr, newinstr, feature, output, input...) \ |
asm volatile (ALTERNATIVE(oldinstr, newinstr, feature) \ |
: output : "i" (0), ## input) |
/* |
* use this macro(s) if you need more than one output parameter |
* in alternative_io |
*/ |
#define ASM_OUTPUT2(a, b) a, b |
struct paravirt_patch_site; |
#ifdef CONFIG_PARAVIRT |
void apply_paravirt(struct paravirt_patch_site *start, |
struct paravirt_patch_site *end); |
#else |
static inline void apply_paravirt(struct paravirt_patch_site *start, |
struct paravirt_patch_site *end) |
{} |
#define __parainstructions NULL |
#define __parainstructions_end NULL |
#endif |
/* |
* Clear and restore the kernel write-protection flag on the local CPU. |
* Allows the kernel to edit read-only pages. |
* Side-effect: any interrupt handler running between save and restore will have |
* the ability to write to read-only pages. |
* |
* Warning: |
* Code patching in the UP case is safe if NMIs and MCE handlers are stopped and |
* no thread can be preempted in the instructions being modified (no iret to an |
* invalid instruction possible) or if the instructions are changed from a |
* consistent state to another consistent state atomically. |
* More care must be taken when modifying code in the SMP case because of |
* Intel's errata. |
* On the local CPU you need to be protected again NMI or MCE handlers seeing an |
* inconsistent instruction while you patch. |
*/ |
extern void *text_poke(void *addr, const void *opcode, size_t len); |
#endif /* _ASM_X86_ALTERNATIVE_H */ |
/drivers/include/linux/asm/asm.h |
---|
0,0 → 1,55 |
#ifndef _ASM_X86_ASM_H |
#define _ASM_X86_ASM_H |
#ifdef __ASSEMBLY__ |
# define __ASM_FORM(x) x |
# define __ASM_EX_SEC .section __ex_table, "a" |
#else |
# define __ASM_FORM(x) " " #x " " |
# define __ASM_EX_SEC " .section __ex_table,\"a\"\n" |
#endif |
#ifdef CONFIG_X86_32 |
# define __ASM_SEL(a,b) __ASM_FORM(a) |
#else |
# define __ASM_SEL(a,b) __ASM_FORM(b) |
#endif |
#define __ASM_SIZE(inst) __ASM_SEL(inst##l, inst##q) |
#define __ASM_REG(reg) __ASM_SEL(e##reg, r##reg) |
#define _ASM_PTR __ASM_SEL(.long, .quad) |
#define _ASM_ALIGN __ASM_SEL(.balign 4, .balign 8) |
#define _ASM_MOV __ASM_SIZE(mov) |
#define _ASM_INC __ASM_SIZE(inc) |
#define _ASM_DEC __ASM_SIZE(dec) |
#define _ASM_ADD __ASM_SIZE(add) |
#define _ASM_SUB __ASM_SIZE(sub) |
#define _ASM_XADD __ASM_SIZE(xadd) |
#define _ASM_AX __ASM_REG(ax) |
#define _ASM_BX __ASM_REG(bx) |
#define _ASM_CX __ASM_REG(cx) |
#define _ASM_DX __ASM_REG(dx) |
#define _ASM_SP __ASM_REG(sp) |
#define _ASM_BP __ASM_REG(bp) |
#define _ASM_SI __ASM_REG(si) |
#define _ASM_DI __ASM_REG(di) |
/* Exception table entry */ |
#ifdef __ASSEMBLY__ |
# define _ASM_EXTABLE(from,to) \ |
__ASM_EX_SEC ; \ |
_ASM_ALIGN ; \ |
_ASM_PTR from , to ; \ |
.previous |
#else |
# define _ASM_EXTABLE(from,to) \ |
__ASM_EX_SEC \ |
_ASM_ALIGN "\n" \ |
_ASM_PTR #from "," #to "\n" \ |
" .previous\n" |
#endif |
#endif /* _ASM_X86_ASM_H */ |
/drivers/include/linux/asm/atomic.h |
---|
0,0 → 1,5 |
#ifdef CONFIG_X86_32 |
# include "atomic_32.h" |
#else |
# include "atomic_64.h" |
#endif |
/drivers/include/linux/asm/atomic_32.h |
---|
0,0 → 1,415 |
#ifndef _ASM_X86_ATOMIC_32_H |
#define _ASM_X86_ATOMIC_32_H |
#include <linux/compiler.h> |
#include <linux/types.h> |
//#include <asm/processor.h> |
#include <asm/cmpxchg.h> |
/* |
* Atomic operations that C can't guarantee us. Useful for |
* resource counting etc.. |
*/ |
#define ATOMIC_INIT(i) { (i) } |
/** |
* atomic_read - read atomic variable |
* @v: pointer of type atomic_t |
* |
* Atomically reads the value of @v. |
*/ |
static inline int atomic_read(const atomic_t *v) |
{ |
return v->counter; |
} |
/** |
* atomic_set - set atomic variable |
* @v: pointer of type atomic_t |
* @i: required value |
* |
* Atomically sets the value of @v to @i. |
*/ |
static inline void atomic_set(atomic_t *v, int i) |
{ |
v->counter = i; |
} |
/** |
* atomic_add - add integer to atomic variable |
* @i: integer value to add |
* @v: pointer of type atomic_t |
* |
* Atomically adds @i to @v. |
*/ |
static inline void atomic_add(int i, atomic_t *v) |
{ |
asm volatile(LOCK_PREFIX "addl %1,%0" |
: "+m" (v->counter) |
: "ir" (i)); |
} |
/** |
* atomic_sub - subtract integer from atomic variable |
* @i: integer value to subtract |
* @v: pointer of type atomic_t |
* |
* Atomically subtracts @i from @v. |
*/ |
static inline void atomic_sub(int i, atomic_t *v) |
{ |
asm volatile(LOCK_PREFIX "subl %1,%0" |
: "+m" (v->counter) |
: "ir" (i)); |
} |
/** |
* atomic_sub_and_test - subtract value from variable and test result |
* @i: integer value to subtract |
* @v: pointer of type atomic_t |
* |
* Atomically subtracts @i from @v and returns |
* true if the result is zero, or false for all |
* other cases. |
*/ |
static inline int atomic_sub_and_test(int i, atomic_t *v) |
{ |
unsigned char c; |
asm volatile(LOCK_PREFIX "subl %2,%0; sete %1" |
: "+m" (v->counter), "=qm" (c) |
: "ir" (i) : "memory"); |
return c; |
} |
/** |
* atomic_inc - increment atomic variable |
* @v: pointer of type atomic_t |
* |
* Atomically increments @v by 1. |
*/ |
static inline void atomic_inc(atomic_t *v) |
{ |
asm volatile(LOCK_PREFIX "incl %0" |
: "+m" (v->counter)); |
} |
/** |
* atomic_dec - decrement atomic variable |
* @v: pointer of type atomic_t |
* |
* Atomically decrements @v by 1. |
*/ |
static inline void atomic_dec(atomic_t *v) |
{ |
asm volatile(LOCK_PREFIX "decl %0" |
: "+m" (v->counter)); |
} |
/** |
* atomic_dec_and_test - decrement and test |
* @v: pointer of type atomic_t |
* |
* Atomically decrements @v by 1 and |
* returns true if the result is 0, or false for all other |
* cases. |
*/ |
static inline int atomic_dec_and_test(atomic_t *v) |
{ |
unsigned char c; |
asm volatile(LOCK_PREFIX "decl %0; sete %1" |
: "+m" (v->counter), "=qm" (c) |
: : "memory"); |
return c != 0; |
} |
/** |
* atomic_inc_and_test - increment and test |
* @v: pointer of type atomic_t |
* |
* Atomically increments @v by 1 |
* and returns true if the result is zero, or false for all |
* other cases. |
*/ |
static inline int atomic_inc_and_test(atomic_t *v) |
{ |
unsigned char c; |
asm volatile(LOCK_PREFIX "incl %0; sete %1" |
: "+m" (v->counter), "=qm" (c) |
: : "memory"); |
return c != 0; |
} |
/** |
* atomic_add_negative - add and test if negative |
* @v: pointer of type atomic_t |
* @i: integer value to add |
* |
* Atomically adds @i to @v and returns true |
* if the result is negative, or false when |
* result is greater than or equal to zero. |
*/ |
static inline int atomic_add_negative(int i, atomic_t *v) |
{ |
unsigned char c; |
asm volatile(LOCK_PREFIX "addl %2,%0; sets %1" |
: "+m" (v->counter), "=qm" (c) |
: "ir" (i) : "memory"); |
return c; |
} |
/** |
* atomic_add_return - add integer and return |
* @v: pointer of type atomic_t |
* @i: integer value to add |
* |
* Atomically adds @i to @v and returns @i + @v |
*/ |
static inline int atomic_add_return(int i, atomic_t *v) |
{ |
int __i; |
#ifdef CONFIG_M386 |
unsigned long flags; |
if (unlikely(boot_cpu_data.x86 <= 3)) |
goto no_xadd; |
#endif |
/* Modern 486+ processor */ |
__i = i; |
asm volatile(LOCK_PREFIX "xaddl %0, %1" |
: "+r" (i), "+m" (v->counter) |
: : "memory"); |
return i + __i; |
#ifdef CONFIG_M386 |
no_xadd: /* Legacy 386 processor */ |
local_irq_save(flags); |
__i = atomic_read(v); |
atomic_set(v, i + __i); |
local_irq_restore(flags); |
return i + __i; |
#endif |
} |
/** |
* atomic_sub_return - subtract integer and return |
* @v: pointer of type atomic_t |
* @i: integer value to subtract |
* |
* Atomically subtracts @i from @v and returns @v - @i |
*/ |
static inline int atomic_sub_return(int i, atomic_t *v) |
{ |
return atomic_add_return(-i, v); |
} |
static inline int atomic_cmpxchg(atomic_t *v, int old, int new) |
{ |
return cmpxchg(&v->counter, old, new); |
} |
static inline int atomic_xchg(atomic_t *v, int new) |
{ |
return xchg(&v->counter, new); |
} |
/** |
* atomic_add_unless - add unless the number is already a given value |
* @v: pointer of type atomic_t |
* @a: the amount to add to v... |
* @u: ...unless v is equal to u. |
* |
* Atomically adds @a to @v, so long as @v was not already @u. |
* Returns non-zero if @v was not @u, and zero otherwise. |
*/ |
static inline int atomic_add_unless(atomic_t *v, int a, int u) |
{ |
int c, old; |
c = atomic_read(v); |
for (;;) { |
if (unlikely(c == (u))) |
break; |
old = atomic_cmpxchg((v), c, c + (a)); |
if (likely(old == c)) |
break; |
c = old; |
} |
return c != (u); |
} |
#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) |
#define atomic_inc_return(v) (atomic_add_return(1, v)) |
#define atomic_dec_return(v) (atomic_sub_return(1, v)) |
/* These are x86-specific, used by some header files */ |
#define atomic_clear_mask(mask, addr) \ |
asm volatile(LOCK_PREFIX "andl %0,%1" \ |
: : "r" (~(mask)), "m" (*(addr)) : "memory") |
#define atomic_set_mask(mask, addr) \ |
asm volatile(LOCK_PREFIX "orl %0,%1" \ |
: : "r" (mask), "m" (*(addr)) : "memory") |
/* Atomic operations are already serializing on x86 */ |
#define smp_mb__before_atomic_dec() barrier() |
#define smp_mb__after_atomic_dec() barrier() |
#define smp_mb__before_atomic_inc() barrier() |
#define smp_mb__after_atomic_inc() barrier() |
/* An 64bit atomic type */ |
typedef struct { |
u64 __aligned(8) counter; |
} atomic64_t; |
#define ATOMIC64_INIT(val) { (val) } |
extern u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old_val, u64 new_val); |
/** |
* atomic64_xchg - xchg atomic64 variable |
* @ptr: pointer to type atomic64_t |
* @new_val: value to assign |
* |
* Atomically xchgs the value of @ptr to @new_val and returns |
* the old value. |
*/ |
extern u64 atomic64_xchg(atomic64_t *ptr, u64 new_val); |
/** |
* atomic64_set - set atomic64 variable |
* @ptr: pointer to type atomic64_t |
* @new_val: value to assign |
* |
* Atomically sets the value of @ptr to @new_val. |
*/ |
extern void atomic64_set(atomic64_t *ptr, u64 new_val); |
/** |
* atomic64_read - read atomic64 variable |
* @ptr: pointer to type atomic64_t |
* |
* Atomically reads the value of @ptr and returns it. |
*/ |
static inline u64 atomic64_read(atomic64_t *ptr) |
{ |
u64 res; |
/* |
* Note, we inline this atomic64_t primitive because |
* it only clobbers EAX/EDX and leaves the others |
* untouched. We also (somewhat subtly) rely on the |
* fact that cmpxchg8b returns the current 64-bit value |
* of the memory location we are touching: |
*/ |
asm volatile( |
"mov %%ebx, %%eax\n\t" |
"mov %%ecx, %%edx\n\t" |
LOCK_PREFIX "cmpxchg8b %1\n" |
: "=&A" (res) |
: "m" (*ptr) |
); |
return res; |
} |
extern u64 atomic64_read(atomic64_t *ptr); |
/** |
* atomic64_add_return - add and return |
* @delta: integer value to add |
* @ptr: pointer to type atomic64_t |
* |
* Atomically adds @delta to @ptr and returns @delta + *@ptr |
*/ |
extern u64 atomic64_add_return(u64 delta, atomic64_t *ptr); |
/* |
* Other variants with different arithmetic operators: |
*/ |
extern u64 atomic64_sub_return(u64 delta, atomic64_t *ptr); |
extern u64 atomic64_inc_return(atomic64_t *ptr); |
extern u64 atomic64_dec_return(atomic64_t *ptr); |
/** |
* atomic64_add - add integer to atomic64 variable |
* @delta: integer value to add |
* @ptr: pointer to type atomic64_t |
* |
* Atomically adds @delta to @ptr. |
*/ |
extern void atomic64_add(u64 delta, atomic64_t *ptr); |
/** |
* atomic64_sub - subtract the atomic64 variable |
* @delta: integer value to subtract |
* @ptr: pointer to type atomic64_t |
* |
* Atomically subtracts @delta from @ptr. |
*/ |
extern void atomic64_sub(u64 delta, atomic64_t *ptr); |
/** |
* atomic64_sub_and_test - subtract value from variable and test result |
* @delta: integer value to subtract |
* @ptr: pointer to type atomic64_t |
* |
* Atomically subtracts @delta from @ptr and returns |
* true if the result is zero, or false for all |
* other cases. |
*/ |
extern int atomic64_sub_and_test(u64 delta, atomic64_t *ptr); |
/** |
* atomic64_inc - increment atomic64 variable |
* @ptr: pointer to type atomic64_t |
* |
* Atomically increments @ptr by 1. |
*/ |
extern void atomic64_inc(atomic64_t *ptr); |
/** |
* atomic64_dec - decrement atomic64 variable |
* @ptr: pointer to type atomic64_t |
* |
* Atomically decrements @ptr by 1. |
*/ |
extern void atomic64_dec(atomic64_t *ptr); |
/** |
* atomic64_dec_and_test - decrement and test |
* @ptr: pointer to type atomic64_t |
* |
* Atomically decrements @ptr by 1 and |
* returns true if the result is 0, or false for all other |
* cases. |
*/ |
extern int atomic64_dec_and_test(atomic64_t *ptr); |
/** |
* atomic64_inc_and_test - increment and test |
* @ptr: pointer to type atomic64_t |
* |
* Atomically increments @ptr by 1 |
* and returns true if the result is zero, or false for all |
* other cases. |
*/ |
extern int atomic64_inc_and_test(atomic64_t *ptr); |
/** |
* atomic64_add_negative - add and test if negative |
* @delta: integer value to add |
* @ptr: pointer to type atomic64_t |
* |
* Atomically adds @delta to @ptr and returns true |
* if the result is negative, or false when |
* result is greater than or equal to zero. |
*/ |
extern int atomic64_add_negative(u64 delta, atomic64_t *ptr); |
#include <asm-generic/atomic-long.h> |
#endif /* _ASM_X86_ATOMIC_32_H */ |
/drivers/include/linux/asm/bitops.h |
---|
0,0 → 1,465 |
#ifndef _ASM_X86_BITOPS_H |
#define _ASM_X86_BITOPS_H |
/* |
* Copyright 1992, Linus Torvalds. |
* |
* Note: inlines with more than a single statement should be marked |
* __always_inline to avoid problems with older gcc's inlining heuristics. |
*/ |
#ifndef _LINUX_BITOPS_H |
#error only <linux/bitops.h> can be included directly |
#endif |
#include <linux/compiler.h> |
#include <asm/alternative.h> |
/* |
* These have to be done with inline assembly: that way the bit-setting |
* is guaranteed to be atomic. All bit operations return 0 if the bit |
* was cleared before the operation and != 0 if it was not. |
* |
* bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1). |
*/ |
#if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 1) |
/* Technically wrong, but this avoids compilation errors on some gcc |
versions. */ |
#define BITOP_ADDR(x) "=m" (*(volatile long *) (x)) |
#else |
#define BITOP_ADDR(x) "+m" (*(volatile long *) (x)) |
#endif |
#define ADDR BITOP_ADDR(addr) |
/* |
* We do the locked ops that don't return the old value as |
* a mask operation on a byte. |
*/ |
#define IS_IMMEDIATE(nr) (__builtin_constant_p(nr)) |
#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3)) |
#define CONST_MASK(nr) (1 << ((nr) & 7)) |
/** |
* set_bit - Atomically set a bit in memory |
* @nr: the bit to set |
* @addr: the address to start counting from |
* |
* This function is atomic and may not be reordered. See __set_bit() |
* if you do not require the atomic guarantees. |
* |
* Note: there are no guarantees that this function will not be reordered |
* on non x86 architectures, so if you are writing portable code, |
* make sure not to rely on its reordering guarantees. |
* |
* Note that @nr may be almost arbitrarily large; this function is not |
* restricted to acting on a single-word quantity. |
*/ |
static __always_inline void |
set_bit(unsigned int nr, volatile unsigned long *addr) |
{ |
if (IS_IMMEDIATE(nr)) { |
asm volatile(LOCK_PREFIX "orb %1,%0" |
: CONST_MASK_ADDR(nr, addr) |
: "iq" ((u8)CONST_MASK(nr)) |
: "memory"); |
} else { |
asm volatile(LOCK_PREFIX "bts %1,%0" |
: BITOP_ADDR(addr) : "Ir" (nr) : "memory"); |
} |
} |
/** |
* __set_bit - Set a bit in memory |
* @nr: the bit to set |
* @addr: the address to start counting from |
* |
* Unlike set_bit(), this function is non-atomic and may be reordered. |
* If it's called on the same region of memory simultaneously, the effect |
* may be that only one operation succeeds. |
*/ |
static inline void __set_bit(int nr, volatile unsigned long *addr) |
{ |
asm volatile("bts %1,%0" : ADDR : "Ir" (nr) : "memory"); |
} |
/** |
* clear_bit - Clears a bit in memory |
* @nr: Bit to clear |
* @addr: Address to start counting from |
* |
* clear_bit() is atomic and may not be reordered. However, it does |
* not contain a memory barrier, so if it is used for locking purposes, |
* you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() |
* in order to ensure changes are visible on other processors. |
*/ |
static __always_inline void |
clear_bit(int nr, volatile unsigned long *addr) |
{ |
if (IS_IMMEDIATE(nr)) { |
asm volatile(LOCK_PREFIX "andb %1,%0" |
: CONST_MASK_ADDR(nr, addr) |
: "iq" ((u8)~CONST_MASK(nr))); |
} else { |
asm volatile(LOCK_PREFIX "btr %1,%0" |
: BITOP_ADDR(addr) |
: "Ir" (nr)); |
} |
} |
/* |
* clear_bit_unlock - Clears a bit in memory |
* @nr: Bit to clear |
* @addr: Address to start counting from |
* |
* clear_bit() is atomic and implies release semantics before the memory |
* operation. It can be used for an unlock. |
*/ |
static inline void clear_bit_unlock(unsigned nr, volatile unsigned long *addr) |
{ |
barrier(); |
clear_bit(nr, addr); |
} |
static inline void __clear_bit(int nr, volatile unsigned long *addr) |
{ |
asm volatile("btr %1,%0" : ADDR : "Ir" (nr)); |
} |
/* |
* __clear_bit_unlock - Clears a bit in memory |
* @nr: Bit to clear |
* @addr: Address to start counting from |
* |
* __clear_bit() is non-atomic and implies release semantics before the memory |
* operation. It can be used for an unlock if no other CPUs can concurrently |
* modify other bits in the word. |
* |
* No memory barrier is required here, because x86 cannot reorder stores past |
* older loads. Same principle as spin_unlock. |
*/ |
static inline void __clear_bit_unlock(unsigned nr, volatile unsigned long *addr) |
{ |
barrier(); |
__clear_bit(nr, addr); |
} |
#define smp_mb__before_clear_bit() barrier() |
#define smp_mb__after_clear_bit() barrier() |
/** |
* __change_bit - Toggle a bit in memory |
* @nr: the bit to change |
* @addr: the address to start counting from |
* |
* Unlike change_bit(), this function is non-atomic and may be reordered. |
* If it's called on the same region of memory simultaneously, the effect |
* may be that only one operation succeeds. |
*/ |
static inline void __change_bit(int nr, volatile unsigned long *addr) |
{ |
asm volatile("btc %1,%0" : ADDR : "Ir" (nr)); |
} |
/** |
* change_bit - Toggle a bit in memory |
* @nr: Bit to change |
* @addr: Address to start counting from |
* |
* change_bit() is atomic and may not be reordered. |
* Note that @nr may be almost arbitrarily large; this function is not |
* restricted to acting on a single-word quantity. |
*/ |
static inline void change_bit(int nr, volatile unsigned long *addr) |
{ |
if (IS_IMMEDIATE(nr)) { |
asm volatile(LOCK_PREFIX "xorb %1,%0" |
: CONST_MASK_ADDR(nr, addr) |
: "iq" ((u8)CONST_MASK(nr))); |
} else { |
asm volatile(LOCK_PREFIX "btc %1,%0" |
: BITOP_ADDR(addr) |
: "Ir" (nr)); |
} |
} |
/** |
* test_and_set_bit - Set a bit and return its old value |
* @nr: Bit to set |
* @addr: Address to count from |
* |
* This operation is atomic and cannot be reordered. |
* It also implies a memory barrier. |
*/ |
static inline int test_and_set_bit(int nr, volatile unsigned long *addr) |
{ |
int oldbit; |
asm volatile(LOCK_PREFIX "bts %2,%1\n\t" |
"sbb %0,%0" : "=r" (oldbit), ADDR : "Ir" (nr) : "memory"); |
return oldbit; |
} |
/** |
* test_and_set_bit_lock - Set a bit and return its old value for lock |
* @nr: Bit to set |
* @addr: Address to count from |
* |
* This is the same as test_and_set_bit on x86. |
*/ |
static __always_inline int |
test_and_set_bit_lock(int nr, volatile unsigned long *addr) |
{ |
return test_and_set_bit(nr, addr); |
} |
/** |
* __test_and_set_bit - Set a bit and return its old value |
* @nr: Bit to set |
* @addr: Address to count from |
* |
* This operation is non-atomic and can be reordered. |
* If two examples of this operation race, one can appear to succeed |
* but actually fail. You must protect multiple accesses with a lock. |
*/ |
static inline int __test_and_set_bit(int nr, volatile unsigned long *addr) |
{ |
int oldbit; |
asm("bts %2,%1\n\t" |
"sbb %0,%0" |
: "=r" (oldbit), ADDR |
: "Ir" (nr)); |
return oldbit; |
} |
/** |
* test_and_clear_bit - Clear a bit and return its old value |
* @nr: Bit to clear |
* @addr: Address to count from |
* |
* This operation is atomic and cannot be reordered. |
* It also implies a memory barrier. |
*/ |
static inline int test_and_clear_bit(int nr, volatile unsigned long *addr) |
{ |
int oldbit; |
asm volatile(LOCK_PREFIX "btr %2,%1\n\t" |
"sbb %0,%0" |
: "=r" (oldbit), ADDR : "Ir" (nr) : "memory"); |
return oldbit; |
} |
/** |
* __test_and_clear_bit - Clear a bit and return its old value |
* @nr: Bit to clear |
* @addr: Address to count from |
* |
* This operation is non-atomic and can be reordered. |
* If two examples of this operation race, one can appear to succeed |
* but actually fail. You must protect multiple accesses with a lock. |
*/ |
static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr) |
{ |
int oldbit; |
asm volatile("btr %2,%1\n\t" |
"sbb %0,%0" |
: "=r" (oldbit), ADDR |
: "Ir" (nr)); |
return oldbit; |
} |
/* WARNING: non atomic and it can be reordered! */ |
static inline int __test_and_change_bit(int nr, volatile unsigned long *addr) |
{ |
int oldbit; |
asm volatile("btc %2,%1\n\t" |
"sbb %0,%0" |
: "=r" (oldbit), ADDR |
: "Ir" (nr) : "memory"); |
return oldbit; |
} |
/** |
* test_and_change_bit - Change a bit and return its old value |
* @nr: Bit to change |
* @addr: Address to count from |
* |
* This operation is atomic and cannot be reordered. |
* It also implies a memory barrier. |
*/ |
static inline int test_and_change_bit(int nr, volatile unsigned long *addr) |
{ |
int oldbit; |
asm volatile(LOCK_PREFIX "btc %2,%1\n\t" |
"sbb %0,%0" |
: "=r" (oldbit), ADDR : "Ir" (nr) : "memory"); |
return oldbit; |
} |
static __always_inline int constant_test_bit(unsigned int nr, const volatile unsigned long *addr) |
{ |
return ((1UL << (nr % BITS_PER_LONG)) & |
(((unsigned long *)addr)[nr / BITS_PER_LONG])) != 0; |
} |
static inline int variable_test_bit(int nr, volatile const unsigned long *addr) |
{ |
int oldbit; |
asm volatile("bt %2,%1\n\t" |
"sbb %0,%0" |
: "=r" (oldbit) |
: "m" (*(unsigned long *)addr), "Ir" (nr)); |
return oldbit; |
} |
#if 0 /* Fool kernel-doc since it doesn't do macros yet */ |
/** |
* test_bit - Determine whether a bit is set |
* @nr: bit number to test |
* @addr: Address to start counting from |
*/ |
static int test_bit(int nr, const volatile unsigned long *addr); |
#endif |
#define test_bit(nr, addr) \ |
(__builtin_constant_p((nr)) \ |
? constant_test_bit((nr), (addr)) \ |
: variable_test_bit((nr), (addr))) |
/** |
* __ffs - find first set bit in word |
* @word: The word to search |
* |
* Undefined if no bit exists, so code should check against 0 first. |
*/ |
static inline unsigned long __ffs(unsigned long word) |
{ |
asm("bsf %1,%0" |
: "=r" (word) |
: "rm" (word)); |
return word; |
} |
/** |
* ffz - find first zero bit in word |
* @word: The word to search |
* |
* Undefined if no zero exists, so code should check against ~0UL first. |
*/ |
static inline unsigned long ffz(unsigned long word) |
{ |
asm("bsf %1,%0" |
: "=r" (word) |
: "r" (~word)); |
return word; |
} |
/* |
* __fls: find last set bit in word |
* @word: The word to search |
* |
* Undefined if no set bit exists, so code should check against 0 first. |
*/ |
static inline unsigned long __fls(unsigned long word) |
{ |
asm("bsr %1,%0" |
: "=r" (word) |
: "rm" (word)); |
return word; |
} |
#ifdef __KERNEL__ |
/** |
* ffs - find first set bit in word |
* @x: the word to search |
* |
* This is defined the same way as the libc and compiler builtin ffs |
* routines, therefore differs in spirit from the other bitops. |
* |
* ffs(value) returns 0 if value is 0 or the position of the first |
* set bit if value is nonzero. The first (least significant) bit |
* is at position 1. |
*/ |
static inline int ffs(int x) |
{ |
int r; |
#ifdef CONFIG_X86_CMOV |
asm("bsfl %1,%0\n\t" |
"cmovzl %2,%0" |
: "=r" (r) : "rm" (x), "r" (-1)); |
#else |
asm("bsfl %1,%0\n\t" |
"jnz 1f\n\t" |
"movl $-1,%0\n" |
"1:" : "=r" (r) : "rm" (x)); |
#endif |
return r + 1; |
} |
/** |
* fls - find last set bit in word |
* @x: the word to search |
* |
* This is defined in a similar way as the libc and compiler builtin |
* ffs, but returns the position of the most significant set bit. |
* |
* fls(value) returns 0 if value is 0 or the position of the last |
* set bit if value is nonzero. The last (most significant) bit is |
* at position 32. |
*/ |
static inline int fls(int x) |
{ |
int r; |
#ifdef CONFIG_X86_CMOV |
asm("bsrl %1,%0\n\t" |
"cmovzl %2,%0" |
: "=&r" (r) : "rm" (x), "rm" (-1)); |
#else |
asm("bsrl %1,%0\n\t" |
"jnz 1f\n\t" |
"movl $-1,%0\n" |
"1:" : "=r" (r) : "rm" (x)); |
#endif |
return r + 1; |
} |
#endif /* __KERNEL__ */ |
#undef ADDR |
#ifdef __KERNEL__ |
#include <asm-generic/bitops/sched.h> |
#define ARCH_HAS_FAST_MULTIPLIER 1 |
#include <asm-generic/bitops/hweight.h> |
#endif /* __KERNEL__ */ |
#include <asm-generic/bitops/fls64.h> |
#ifdef __KERNEL__ |
#include <asm-generic/bitops/ext2-non-atomic.h> |
#define ext2_set_bit_atomic(lock, nr, addr) \ |
test_and_set_bit((nr), (unsigned long *)(addr)) |
#define ext2_clear_bit_atomic(lock, nr, addr) \ |
test_and_clear_bit((nr), (unsigned long *)(addr)) |
#include <asm-generic/bitops/minix.h> |
#endif /* __KERNEL__ */ |
#endif /* _ASM_X86_BITOPS_H */ |
/drivers/include/linux/asm/bitsperlong.h |
---|
0,0 → 1,13 |
#ifndef __ASM_X86_BITSPERLONG_H |
#define __ASM_X86_BITSPERLONG_H |
#ifdef __x86_64__ |
# define __BITS_PER_LONG 64 |
#else |
# define __BITS_PER_LONG 32 |
#endif |
#include <asm-generic/bitsperlong.h> |
#endif /* __ASM_X86_BITSPERLONG_H */ |
/drivers/include/linux/asm/byteorder.h |
---|
0,0 → 1,6 |
#ifndef _ASM_X86_BYTEORDER_H |
#define _ASM_X86_BYTEORDER_H |
#include <linux/byteorder/little_endian.h> |
#endif /* _ASM_X86_BYTEORDER_H */ |
/drivers/include/linux/asm/cmpxchg.h |
---|
0,0 → 1,5 |
#ifdef CONFIG_X86_32 |
# include "cmpxchg_32.h" |
#else |
# include "cmpxchg_64.h" |
#endif |
/drivers/include/linux/asm/cmpxchg_32.h |
---|
0,0 → 1,274 |
#ifndef _ASM_X86_CMPXCHG_32_H |
#define _ASM_X86_CMPXCHG_32_H |
#include <linux/bitops.h> /* for LOCK_PREFIX */ |
/* |
* Note: if you use set64_bit(), __cmpxchg64(), or their variants, you |
* you need to test for the feature in boot_cpu_data. |
*/ |
extern void __xchg_wrong_size(void); |
/* |
* Note: no "lock" prefix even on SMP: xchg always implies lock anyway |
* Note 2: xchg has side effect, so that attribute volatile is necessary, |
* but generally the primitive is invalid, *ptr is output argument. --ANK |
*/ |
struct __xchg_dummy { |
unsigned long a[100]; |
}; |
#define __xg(x) ((struct __xchg_dummy *)(x)) |
#define __xchg(x, ptr, size) \ |
({ \ |
__typeof(*(ptr)) __x = (x); \ |
switch (size) { \ |
case 1: \ |
asm volatile("xchgb %b0,%1" \ |
: "=q" (__x) \ |
: "m" (*__xg(ptr)), "0" (__x) \ |
: "memory"); \ |
break; \ |
case 2: \ |
asm volatile("xchgw %w0,%1" \ |
: "=r" (__x) \ |
: "m" (*__xg(ptr)), "0" (__x) \ |
: "memory"); \ |
break; \ |
case 4: \ |
asm volatile("xchgl %0,%1" \ |
: "=r" (__x) \ |
: "m" (*__xg(ptr)), "0" (__x) \ |
: "memory"); \ |
break; \ |
default: \ |
__xchg_wrong_size(); \ |
} \ |
__x; \ |
}) |
#define xchg(ptr, v) \ |
__xchg((v), (ptr), sizeof(*ptr)) |
/* |
* The semantics of XCHGCMP8B are a bit strange, this is why |
* there is a loop and the loading of %%eax and %%edx has to |
* be inside. This inlines well in most cases, the cached |
* cost is around ~38 cycles. (in the future we might want |
* to do an SIMD/3DNOW!/MMX/FPU 64-bit store here, but that |
* might have an implicit FPU-save as a cost, so it's not |
* clear which path to go.) |
* |
* cmpxchg8b must be used with the lock prefix here to allow |
* the instruction to be executed atomically, see page 3-102 |
* of the instruction set reference 24319102.pdf. We need |
* the reader side to see the coherent 64bit value. |
*/ |
static inline void __set_64bit(unsigned long long *ptr, |
unsigned int low, unsigned int high) |
{ |
asm volatile("\n1:\t" |
"movl (%0), %%eax\n\t" |
"movl 4(%0), %%edx\n\t" |
LOCK_PREFIX "cmpxchg8b (%0)\n\t" |
"jnz 1b" |
: /* no outputs */ |
: "D"(ptr), |
"b"(low), |
"c"(high) |
: "ax", "dx", "memory"); |
} |
static inline void __set_64bit_constant(unsigned long long *ptr, |
unsigned long long value) |
{ |
__set_64bit(ptr, (unsigned int)value, (unsigned int)(value >> 32)); |
} |
#define ll_low(x) *(((unsigned int *)&(x)) + 0) |
#define ll_high(x) *(((unsigned int *)&(x)) + 1) |
static inline void __set_64bit_var(unsigned long long *ptr, |
unsigned long long value) |
{ |
__set_64bit(ptr, ll_low(value), ll_high(value)); |
} |
#define set_64bit(ptr, value) \ |
(__builtin_constant_p((value)) \ |
? __set_64bit_constant((ptr), (value)) \ |
: __set_64bit_var((ptr), (value))) |
#define _set_64bit(ptr, value) \ |
(__builtin_constant_p(value) \ |
? __set_64bit(ptr, (unsigned int)(value), \ |
(unsigned int)((value) >> 32)) \ |
: __set_64bit(ptr, ll_low((value)), ll_high((value)))) |
extern void __cmpxchg_wrong_size(void); |
/* |
* Atomic compare and exchange. Compare OLD with MEM, if identical, |
* store NEW in MEM. Return the initial value in MEM. Success is |
* indicated by comparing RETURN with OLD. |
*/ |
#define __raw_cmpxchg(ptr, old, new, size, lock) \ |
({ \ |
__typeof__(*(ptr)) __ret; \ |
__typeof__(*(ptr)) __old = (old); \ |
__typeof__(*(ptr)) __new = (new); \ |
switch (size) { \ |
case 1: \ |
asm volatile(lock "cmpxchgb %b1,%2" \ |
: "=a"(__ret) \ |
: "q"(__new), "m"(*__xg(ptr)), "0"(__old) \ |
: "memory"); \ |
break; \ |
case 2: \ |
asm volatile(lock "cmpxchgw %w1,%2" \ |
: "=a"(__ret) \ |
: "r"(__new), "m"(*__xg(ptr)), "0"(__old) \ |
: "memory"); \ |
break; \ |
case 4: \ |
asm volatile(lock "cmpxchgl %1,%2" \ |
: "=a"(__ret) \ |
: "r"(__new), "m"(*__xg(ptr)), "0"(__old) \ |
: "memory"); \ |
break; \ |
default: \ |
__cmpxchg_wrong_size(); \ |
} \ |
__ret; \ |
}) |
#define __cmpxchg(ptr, old, new, size) \ |
__raw_cmpxchg((ptr), (old), (new), (size), LOCK_PREFIX) |
#define __sync_cmpxchg(ptr, old, new, size) \ |
__raw_cmpxchg((ptr), (old), (new), (size), "lock; ") |
#define __cmpxchg_local(ptr, old, new, size) \ |
__raw_cmpxchg((ptr), (old), (new), (size), "") |
#ifdef CONFIG_X86_CMPXCHG |
#define __HAVE_ARCH_CMPXCHG 1 |
#define cmpxchg(ptr, old, new) \ |
__cmpxchg((ptr), (old), (new), sizeof(*ptr)) |
#define sync_cmpxchg(ptr, old, new) \ |
__sync_cmpxchg((ptr), (old), (new), sizeof(*ptr)) |
#define cmpxchg_local(ptr, old, new) \ |
__cmpxchg_local((ptr), (old), (new), sizeof(*ptr)) |
#endif |
#ifdef CONFIG_X86_CMPXCHG64 |
#define cmpxchg64(ptr, o, n) \ |
((__typeof__(*(ptr)))__cmpxchg64((ptr), (unsigned long long)(o), \ |
(unsigned long long)(n))) |
#define cmpxchg64_local(ptr, o, n) \ |
((__typeof__(*(ptr)))__cmpxchg64_local((ptr), (unsigned long long)(o), \ |
(unsigned long long)(n))) |
#endif |
static inline unsigned long long __cmpxchg64(volatile void *ptr, |
unsigned long long old, |
unsigned long long new) |
{ |
unsigned long long prev; |
asm volatile(LOCK_PREFIX "cmpxchg8b %3" |
: "=A"(prev) |
: "b"((unsigned long)new), |
"c"((unsigned long)(new >> 32)), |
"m"(*__xg(ptr)), |
"0"(old) |
: "memory"); |
return prev; |
} |
static inline unsigned long long __cmpxchg64_local(volatile void *ptr, |
unsigned long long old, |
unsigned long long new) |
{ |
unsigned long long prev; |
asm volatile("cmpxchg8b %3" |
: "=A"(prev) |
: "b"((unsigned long)new), |
"c"((unsigned long)(new >> 32)), |
"m"(*__xg(ptr)), |
"0"(old) |
: "memory"); |
return prev; |
} |
#ifndef CONFIG_X86_CMPXCHG |
/* |
* Building a kernel capable running on 80386. It may be necessary to |
* simulate the cmpxchg on the 80386 CPU. For that purpose we define |
* a function for each of the sizes we support. |
*/ |
#define cmpxchg(ptr, o, n) \ |
({ \ |
__typeof__(*(ptr)) __ret; \ |
__ret = (__typeof__(*(ptr)))__cmpxchg((ptr), \ |
(unsigned long)(o), (unsigned long)(n), \ |
sizeof(*(ptr))); \ |
__ret; \ |
}) |
#define cmpxchg_local(ptr, o, n) \ |
({ \ |
__typeof__(*(ptr)) __ret; \ |
__ret = (__typeof__(*(ptr)))__cmpxchg_local((ptr), \ |
(unsigned long)(o), (unsigned long)(n), \ |
sizeof(*(ptr))); \ |
__ret; \ |
}) |
#endif |
#ifndef CONFIG_X86_CMPXCHG64 |
/* |
* Building a kernel capable running on 80386 and 80486. It may be necessary |
* to simulate the cmpxchg8b on the 80386 and 80486 CPU. |
*/ |
extern unsigned long long cmpxchg_486_u64(volatile void *, u64, u64); |
#define cmpxchg64(ptr, o, n) \ |
({ \ |
__typeof__(*(ptr)) __ret; \ |
__typeof__(*(ptr)) __old = (o); \ |
__typeof__(*(ptr)) __new = (n); \ |
alternative_io("call cmpxchg8b_emu", \ |
"lock; cmpxchg8b (%%esi)" , \ |
X86_FEATURE_CX8, \ |
"=A" (__ret), \ |
"S" ((ptr)), "0" (__old), \ |
"b" ((unsigned int)__new), \ |
"c" ((unsigned int)(__new>>32)) \ |
: "memory"); \ |
__ret; }) |
#define cmpxchg64_local(ptr, o, n) \ |
({ \ |
__typeof__(*(ptr)) __ret; \ |
if (likely(boot_cpu_data.x86 > 4)) \ |
__ret = (__typeof__(*(ptr)))__cmpxchg64_local((ptr), \ |
(unsigned long long)(o), \ |
(unsigned long long)(n)); \ |
else \ |
__ret = (__typeof__(*(ptr)))cmpxchg_486_u64((ptr), \ |
(unsigned long long)(o), \ |
(unsigned long long)(n)); \ |
__ret; \ |
}) |
#endif |
#endif /* _ASM_X86_CMPXCHG_32_H */ |
/drivers/include/linux/asm/cpufeature.h |
---|
0,0 → 1,283 |
/* |
* Defines x86 CPU feature bits |
*/ |
#ifndef _ASM_X86_CPUFEATURE_H |
#define _ASM_X86_CPUFEATURE_H |
#include <asm/required-features.h> |
#define NCAPINTS 9 /* N 32-bit words worth of info */ |
/* |
* Note: If the comment begins with a quoted string, that string is used |
* in /proc/cpuinfo instead of the macro name. If the string is "", |
* this feature bit is not displayed in /proc/cpuinfo at all. |
*/ |
/* Intel-defined CPU features, CPUID level 0x00000001 (edx), word 0 */ |
#define X86_FEATURE_FPU (0*32+ 0) /* Onboard FPU */ |
#define X86_FEATURE_VME (0*32+ 1) /* Virtual Mode Extensions */ |
#define X86_FEATURE_DE (0*32+ 2) /* Debugging Extensions */ |
#define X86_FEATURE_PSE (0*32+ 3) /* Page Size Extensions */ |
#define X86_FEATURE_TSC (0*32+ 4) /* Time Stamp Counter */ |
#define X86_FEATURE_MSR (0*32+ 5) /* Model-Specific Registers */ |
#define X86_FEATURE_PAE (0*32+ 6) /* Physical Address Extensions */ |
#define X86_FEATURE_MCE (0*32+ 7) /* Machine Check Exception */ |
#define X86_FEATURE_CX8 (0*32+ 8) /* CMPXCHG8 instruction */ |
#define X86_FEATURE_APIC (0*32+ 9) /* Onboard APIC */ |
#define X86_FEATURE_SEP (0*32+11) /* SYSENTER/SYSEXIT */ |
#define X86_FEATURE_MTRR (0*32+12) /* Memory Type Range Registers */ |
#define X86_FEATURE_PGE (0*32+13) /* Page Global Enable */ |
#define X86_FEATURE_MCA (0*32+14) /* Machine Check Architecture */ |
#define X86_FEATURE_CMOV (0*32+15) /* CMOV instructions */ |
/* (plus FCMOVcc, FCOMI with FPU) */ |
#define X86_FEATURE_PAT (0*32+16) /* Page Attribute Table */ |
#define X86_FEATURE_PSE36 (0*32+17) /* 36-bit PSEs */ |
#define X86_FEATURE_PN (0*32+18) /* Processor serial number */ |
#define X86_FEATURE_CLFLSH (0*32+19) /* "clflush" CLFLUSH instruction */ |
#define X86_FEATURE_DS (0*32+21) /* "dts" Debug Store */ |
#define X86_FEATURE_ACPI (0*32+22) /* ACPI via MSR */ |
#define X86_FEATURE_MMX (0*32+23) /* Multimedia Extensions */ |
#define X86_FEATURE_FXSR (0*32+24) /* FXSAVE/FXRSTOR, CR4.OSFXSR */ |
#define X86_FEATURE_XMM (0*32+25) /* "sse" */ |
#define X86_FEATURE_XMM2 (0*32+26) /* "sse2" */ |
#define X86_FEATURE_SELFSNOOP (0*32+27) /* "ss" CPU self snoop */ |
#define X86_FEATURE_HT (0*32+28) /* Hyper-Threading */ |
#define X86_FEATURE_ACC (0*32+29) /* "tm" Automatic clock control */ |
#define X86_FEATURE_IA64 (0*32+30) /* IA-64 processor */ |
#define X86_FEATURE_PBE (0*32+31) /* Pending Break Enable */ |
/* AMD-defined CPU features, CPUID level 0x80000001, word 1 */ |
/* Don't duplicate feature flags which are redundant with Intel! */ |
#define X86_FEATURE_SYSCALL (1*32+11) /* SYSCALL/SYSRET */ |
#define X86_FEATURE_MP (1*32+19) /* MP Capable. */ |
#define X86_FEATURE_NX (1*32+20) /* Execute Disable */ |
#define X86_FEATURE_MMXEXT (1*32+22) /* AMD MMX extensions */ |
#define X86_FEATURE_FXSR_OPT (1*32+25) /* FXSAVE/FXRSTOR optimizations */ |
#define X86_FEATURE_GBPAGES (1*32+26) /* "pdpe1gb" GB pages */ |
#define X86_FEATURE_RDTSCP (1*32+27) /* RDTSCP */ |
#define X86_FEATURE_LM (1*32+29) /* Long Mode (x86-64) */ |
#define X86_FEATURE_3DNOWEXT (1*32+30) /* AMD 3DNow! extensions */ |
#define X86_FEATURE_3DNOW (1*32+31) /* 3DNow! */ |
/* Transmeta-defined CPU features, CPUID level 0x80860001, word 2 */ |
#define X86_FEATURE_RECOVERY (2*32+ 0) /* CPU in recovery mode */ |
#define X86_FEATURE_LONGRUN (2*32+ 1) /* Longrun power control */ |
#define X86_FEATURE_LRTI (2*32+ 3) /* LongRun table interface */ |
/* Other features, Linux-defined mapping, word 3 */ |
/* This range is used for feature bits which conflict or are synthesized */ |
#define X86_FEATURE_CXMMX (3*32+ 0) /* Cyrix MMX extensions */ |
#define X86_FEATURE_K6_MTRR (3*32+ 1) /* AMD K6 nonstandard MTRRs */ |
#define X86_FEATURE_CYRIX_ARR (3*32+ 2) /* Cyrix ARRs (= MTRRs) */ |
#define X86_FEATURE_CENTAUR_MCR (3*32+ 3) /* Centaur MCRs (= MTRRs) */ |
/* cpu types for specific tunings: */ |
#define X86_FEATURE_K8 (3*32+ 4) /* "" Opteron, Athlon64 */ |
#define X86_FEATURE_K7 (3*32+ 5) /* "" Athlon */ |
#define X86_FEATURE_P3 (3*32+ 6) /* "" P3 */ |
#define X86_FEATURE_P4 (3*32+ 7) /* "" P4 */ |
#define X86_FEATURE_CONSTANT_TSC (3*32+ 8) /* TSC ticks at a constant rate */ |
#define X86_FEATURE_UP (3*32+ 9) /* smp kernel running on up */ |
#define X86_FEATURE_FXSAVE_LEAK (3*32+10) /* "" FXSAVE leaks FOP/FIP/FOP */ |
#define X86_FEATURE_ARCH_PERFMON (3*32+11) /* Intel Architectural PerfMon */ |
#define X86_FEATURE_PEBS (3*32+12) /* Precise-Event Based Sampling */ |
#define X86_FEATURE_BTS (3*32+13) /* Branch Trace Store */ |
#define X86_FEATURE_SYSCALL32 (3*32+14) /* "" syscall in ia32 userspace */ |
#define X86_FEATURE_SYSENTER32 (3*32+15) /* "" sysenter in ia32 userspace */ |
#define X86_FEATURE_REP_GOOD (3*32+16) /* rep microcode works well */ |
#define X86_FEATURE_MFENCE_RDTSC (3*32+17) /* "" Mfence synchronizes RDTSC */ |
#define X86_FEATURE_LFENCE_RDTSC (3*32+18) /* "" Lfence synchronizes RDTSC */ |
#define X86_FEATURE_11AP (3*32+19) /* "" Bad local APIC aka 11AP */ |
#define X86_FEATURE_NOPL (3*32+20) /* The NOPL (0F 1F) instructions */ |
#define X86_FEATURE_AMDC1E (3*32+21) /* AMD C1E detected */ |
#define X86_FEATURE_XTOPOLOGY (3*32+22) /* cpu topology enum extensions */ |
#define X86_FEATURE_TSC_RELIABLE (3*32+23) /* TSC is known to be reliable */ |
#define X86_FEATURE_NONSTOP_TSC (3*32+24) /* TSC does not stop in C states */ |
#define X86_FEATURE_CLFLUSH_MONITOR (3*32+25) /* "" clflush reqd with monitor */ |
#define X86_FEATURE_EXTD_APICID (3*32+26) /* has extended APICID (8 bits) */ |
#define X86_FEATURE_AMD_DCM (3*32+27) /* multi-node processor */ |
#define X86_FEATURE_APERFMPERF (3*32+28) /* APERFMPERF */ |
/* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */ |
#define X86_FEATURE_XMM3 (4*32+ 0) /* "pni" SSE-3 */ |
#define X86_FEATURE_PCLMULQDQ (4*32+ 1) /* PCLMULQDQ instruction */ |
#define X86_FEATURE_DTES64 (4*32+ 2) /* 64-bit Debug Store */ |
#define X86_FEATURE_MWAIT (4*32+ 3) /* "monitor" Monitor/Mwait support */ |
#define X86_FEATURE_DSCPL (4*32+ 4) /* "ds_cpl" CPL Qual. Debug Store */ |
#define X86_FEATURE_VMX (4*32+ 5) /* Hardware virtualization */ |
#define X86_FEATURE_SMX (4*32+ 6) /* Safer mode */ |
#define X86_FEATURE_EST (4*32+ 7) /* Enhanced SpeedStep */ |
#define X86_FEATURE_TM2 (4*32+ 8) /* Thermal Monitor 2 */ |
#define X86_FEATURE_SSSE3 (4*32+ 9) /* Supplemental SSE-3 */ |
#define X86_FEATURE_CID (4*32+10) /* Context ID */ |
#define X86_FEATURE_FMA (4*32+12) /* Fused multiply-add */ |
#define X86_FEATURE_CX16 (4*32+13) /* CMPXCHG16B */ |
#define X86_FEATURE_XTPR (4*32+14) /* Send Task Priority Messages */ |
#define X86_FEATURE_PDCM (4*32+15) /* Performance Capabilities */ |
#define X86_FEATURE_DCA (4*32+18) /* Direct Cache Access */ |
#define X86_FEATURE_XMM4_1 (4*32+19) /* "sse4_1" SSE-4.1 */ |
#define X86_FEATURE_XMM4_2 (4*32+20) /* "sse4_2" SSE-4.2 */ |
#define X86_FEATURE_X2APIC (4*32+21) /* x2APIC */ |
#define X86_FEATURE_MOVBE (4*32+22) /* MOVBE instruction */ |
#define X86_FEATURE_POPCNT (4*32+23) /* POPCNT instruction */ |
#define X86_FEATURE_AES (4*32+25) /* AES instructions */ |
#define X86_FEATURE_XSAVE (4*32+26) /* XSAVE/XRSTOR/XSETBV/XGETBV */ |
#define X86_FEATURE_OSXSAVE (4*32+27) /* "" XSAVE enabled in the OS */ |
#define X86_FEATURE_AVX (4*32+28) /* Advanced Vector Extensions */ |
#define X86_FEATURE_HYPERVISOR (4*32+31) /* Running on a hypervisor */ |
/* VIA/Cyrix/Centaur-defined CPU features, CPUID level 0xC0000001, word 5 */ |
#define X86_FEATURE_XSTORE (5*32+ 2) /* "rng" RNG present (xstore) */ |
#define X86_FEATURE_XSTORE_EN (5*32+ 3) /* "rng_en" RNG enabled */ |
#define X86_FEATURE_XCRYPT (5*32+ 6) /* "ace" on-CPU crypto (xcrypt) */ |
#define X86_FEATURE_XCRYPT_EN (5*32+ 7) /* "ace_en" on-CPU crypto enabled */ |
#define X86_FEATURE_ACE2 (5*32+ 8) /* Advanced Cryptography Engine v2 */ |
#define X86_FEATURE_ACE2_EN (5*32+ 9) /* ACE v2 enabled */ |
#define X86_FEATURE_PHE (5*32+10) /* PadLock Hash Engine */ |
#define X86_FEATURE_PHE_EN (5*32+11) /* PHE enabled */ |
#define X86_FEATURE_PMM (5*32+12) /* PadLock Montgomery Multiplier */ |
#define X86_FEATURE_PMM_EN (5*32+13) /* PMM enabled */ |
/* More extended AMD flags: CPUID level 0x80000001, ecx, word 6 */ |
#define X86_FEATURE_LAHF_LM (6*32+ 0) /* LAHF/SAHF in long mode */ |
#define X86_FEATURE_CMP_LEGACY (6*32+ 1) /* If yes HyperThreading not valid */ |
#define X86_FEATURE_SVM (6*32+ 2) /* Secure virtual machine */ |
#define X86_FEATURE_EXTAPIC (6*32+ 3) /* Extended APIC space */ |
#define X86_FEATURE_CR8_LEGACY (6*32+ 4) /* CR8 in 32-bit mode */ |
#define X86_FEATURE_ABM (6*32+ 5) /* Advanced bit manipulation */ |
#define X86_FEATURE_SSE4A (6*32+ 6) /* SSE-4A */ |
#define X86_FEATURE_MISALIGNSSE (6*32+ 7) /* Misaligned SSE mode */ |
#define X86_FEATURE_3DNOWPREFETCH (6*32+ 8) /* 3DNow prefetch instructions */ |
#define X86_FEATURE_OSVW (6*32+ 9) /* OS Visible Workaround */ |
#define X86_FEATURE_IBS (6*32+10) /* Instruction Based Sampling */ |
#define X86_FEATURE_SSE5 (6*32+11) /* SSE-5 */ |
#define X86_FEATURE_SKINIT (6*32+12) /* SKINIT/STGI instructions */ |
#define X86_FEATURE_WDT (6*32+13) /* Watchdog timer */ |
/* |
* Auxiliary flags: Linux defined - For features scattered in various |
* CPUID levels like 0x6, 0xA etc |
*/ |
#define X86_FEATURE_IDA (7*32+ 0) /* Intel Dynamic Acceleration */ |
#define X86_FEATURE_ARAT (7*32+ 1) /* Always Running APIC Timer */ |
/* Virtualization flags: Linux defined */ |
#define X86_FEATURE_TPR_SHADOW (8*32+ 0) /* Intel TPR Shadow */ |
#define X86_FEATURE_VNMI (8*32+ 1) /* Intel Virtual NMI */ |
#define X86_FEATURE_FLEXPRIORITY (8*32+ 2) /* Intel FlexPriority */ |
#define X86_FEATURE_EPT (8*32+ 3) /* Intel Extended Page Table */ |
#define X86_FEATURE_VPID (8*32+ 4) /* Intel Virtual Processor ID */ |
#if defined(__KERNEL__) && !defined(__ASSEMBLY__) |
#include <linux/bitops.h> |
extern const char * const x86_cap_flags[NCAPINTS*32]; |
extern const char * const x86_power_flags[32]; |
#define test_cpu_cap(c, bit) \ |
test_bit(bit, (unsigned long *)((c)->x86_capability)) |
#define cpu_has(c, bit) \ |
(__builtin_constant_p(bit) && \ |
( (((bit)>>5)==0 && (1UL<<((bit)&31) & REQUIRED_MASK0)) || \ |
(((bit)>>5)==1 && (1UL<<((bit)&31) & REQUIRED_MASK1)) || \ |
(((bit)>>5)==2 && (1UL<<((bit)&31) & REQUIRED_MASK2)) || \ |
(((bit)>>5)==3 && (1UL<<((bit)&31) & REQUIRED_MASK3)) || \ |
(((bit)>>5)==4 && (1UL<<((bit)&31) & REQUIRED_MASK4)) || \ |
(((bit)>>5)==5 && (1UL<<((bit)&31) & REQUIRED_MASK5)) || \ |
(((bit)>>5)==6 && (1UL<<((bit)&31) & REQUIRED_MASK6)) || \ |
(((bit)>>5)==7 && (1UL<<((bit)&31) & REQUIRED_MASK7)) ) \ |
? 1 : \ |
test_cpu_cap(c, bit)) |
#define boot_cpu_has(bit) cpu_has(&boot_cpu_data, bit) |
#define set_cpu_cap(c, bit) set_bit(bit, (unsigned long *)((c)->x86_capability)) |
#define clear_cpu_cap(c, bit) clear_bit(bit, (unsigned long *)((c)->x86_capability)) |
#define setup_clear_cpu_cap(bit) do { \ |
clear_cpu_cap(&boot_cpu_data, bit); \ |
set_bit(bit, (unsigned long *)cpu_caps_cleared); \ |
} while (0) |
#define setup_force_cpu_cap(bit) do { \ |
set_cpu_cap(&boot_cpu_data, bit); \ |
set_bit(bit, (unsigned long *)cpu_caps_set); \ |
} while (0) |
#define cpu_has_fpu boot_cpu_has(X86_FEATURE_FPU) |
#define cpu_has_vme boot_cpu_has(X86_FEATURE_VME) |
#define cpu_has_de boot_cpu_has(X86_FEATURE_DE) |
#define cpu_has_pse boot_cpu_has(X86_FEATURE_PSE) |
#define cpu_has_tsc boot_cpu_has(X86_FEATURE_TSC) |
#define cpu_has_pae boot_cpu_has(X86_FEATURE_PAE) |
#define cpu_has_pge boot_cpu_has(X86_FEATURE_PGE) |
#define cpu_has_apic boot_cpu_has(X86_FEATURE_APIC) |
#define cpu_has_sep boot_cpu_has(X86_FEATURE_SEP) |
#define cpu_has_mtrr boot_cpu_has(X86_FEATURE_MTRR) |
#define cpu_has_mmx boot_cpu_has(X86_FEATURE_MMX) |
#define cpu_has_fxsr boot_cpu_has(X86_FEATURE_FXSR) |
#define cpu_has_xmm boot_cpu_has(X86_FEATURE_XMM) |
#define cpu_has_xmm2 boot_cpu_has(X86_FEATURE_XMM2) |
#define cpu_has_xmm3 boot_cpu_has(X86_FEATURE_XMM3) |
#define cpu_has_aes boot_cpu_has(X86_FEATURE_AES) |
#define cpu_has_ht boot_cpu_has(X86_FEATURE_HT) |
#define cpu_has_mp boot_cpu_has(X86_FEATURE_MP) |
#define cpu_has_nx boot_cpu_has(X86_FEATURE_NX) |
#define cpu_has_k6_mtrr boot_cpu_has(X86_FEATURE_K6_MTRR) |
#define cpu_has_cyrix_arr boot_cpu_has(X86_FEATURE_CYRIX_ARR) |
#define cpu_has_centaur_mcr boot_cpu_has(X86_FEATURE_CENTAUR_MCR) |
#define cpu_has_xstore boot_cpu_has(X86_FEATURE_XSTORE) |
#define cpu_has_xstore_enabled boot_cpu_has(X86_FEATURE_XSTORE_EN) |
#define cpu_has_xcrypt boot_cpu_has(X86_FEATURE_XCRYPT) |
#define cpu_has_xcrypt_enabled boot_cpu_has(X86_FEATURE_XCRYPT_EN) |
#define cpu_has_ace2 boot_cpu_has(X86_FEATURE_ACE2) |
#define cpu_has_ace2_enabled boot_cpu_has(X86_FEATURE_ACE2_EN) |
#define cpu_has_phe boot_cpu_has(X86_FEATURE_PHE) |
#define cpu_has_phe_enabled boot_cpu_has(X86_FEATURE_PHE_EN) |
#define cpu_has_pmm boot_cpu_has(X86_FEATURE_PMM) |
#define cpu_has_pmm_enabled boot_cpu_has(X86_FEATURE_PMM_EN) |
#define cpu_has_ds boot_cpu_has(X86_FEATURE_DS) |
#define cpu_has_pebs boot_cpu_has(X86_FEATURE_PEBS) |
#define cpu_has_clflush boot_cpu_has(X86_FEATURE_CLFLSH) |
#define cpu_has_bts boot_cpu_has(X86_FEATURE_BTS) |
#define cpu_has_gbpages boot_cpu_has(X86_FEATURE_GBPAGES) |
#define cpu_has_arch_perfmon boot_cpu_has(X86_FEATURE_ARCH_PERFMON) |
#define cpu_has_pat boot_cpu_has(X86_FEATURE_PAT) |
#define cpu_has_xmm4_1 boot_cpu_has(X86_FEATURE_XMM4_1) |
#define cpu_has_xmm4_2 boot_cpu_has(X86_FEATURE_XMM4_2) |
#define cpu_has_x2apic boot_cpu_has(X86_FEATURE_X2APIC) |
#define cpu_has_xsave boot_cpu_has(X86_FEATURE_XSAVE) |
#define cpu_has_hypervisor boot_cpu_has(X86_FEATURE_HYPERVISOR) |
#define cpu_has_pclmulqdq boot_cpu_has(X86_FEATURE_PCLMULQDQ) |
#if defined(CONFIG_X86_INVLPG) || defined(CONFIG_X86_64) |
# define cpu_has_invlpg 1 |
#else |
# define cpu_has_invlpg (boot_cpu_data.x86 > 3) |
#endif |
#ifdef CONFIG_X86_64 |
#undef cpu_has_vme |
#define cpu_has_vme 0 |
#undef cpu_has_pae |
#define cpu_has_pae ___BUG___ |
#undef cpu_has_mp |
#define cpu_has_mp 1 |
#undef cpu_has_k6_mtrr |
#define cpu_has_k6_mtrr 0 |
#undef cpu_has_cyrix_arr |
#define cpu_has_cyrix_arr 0 |
#undef cpu_has_centaur_mcr |
#define cpu_has_centaur_mcr 0 |
#endif /* CONFIG_X86_64 */ |
#endif /* defined(__KERNEL__) && !defined(__ASSEMBLY__) */ |
#endif /* _ASM_X86_CPUFEATURE_H */ |
/drivers/include/linux/asm/posix_types.h |
---|
0,0 → 1,13 |
#ifdef __KERNEL__ |
# ifdef CONFIG_X86_32 |
# include "posix_types_32.h" |
# else |
# include "posix_types_64.h" |
# endif |
#else |
# ifdef __i386__ |
# include "posix_types_32.h" |
# else |
# include "posix_types_64.h" |
# endif |
#endif |
/drivers/include/linux/asm/posix_types_32.h |
---|
0,0 → 1,85 |
#ifndef _ASM_X86_POSIX_TYPES_32_H |
#define _ASM_X86_POSIX_TYPES_32_H |
/* |
* This file is generally used by user-level software, so you need to |
* be a little careful about namespace pollution etc. Also, we cannot |
* assume GCC is being used. |
*/ |
typedef unsigned long __kernel_ino_t; |
typedef unsigned short __kernel_mode_t; |
typedef unsigned short __kernel_nlink_t; |
typedef long __kernel_off_t; |
typedef int __kernel_pid_t; |
typedef unsigned short __kernel_ipc_pid_t; |
typedef unsigned short __kernel_uid_t; |
typedef unsigned short __kernel_gid_t; |
typedef unsigned int __kernel_size_t; |
typedef int __kernel_ssize_t; |
typedef int __kernel_ptrdiff_t; |
typedef long __kernel_time_t; |
typedef long __kernel_suseconds_t; |
typedef long __kernel_clock_t; |
typedef int __kernel_timer_t; |
typedef int __kernel_clockid_t; |
typedef int __kernel_daddr_t; |
typedef char * __kernel_caddr_t; |
typedef unsigned short __kernel_uid16_t; |
typedef unsigned short __kernel_gid16_t; |
typedef unsigned int __kernel_uid32_t; |
typedef unsigned int __kernel_gid32_t; |
typedef unsigned short __kernel_old_uid_t; |
typedef unsigned short __kernel_old_gid_t; |
typedef unsigned short __kernel_old_dev_t; |
#ifdef __GNUC__ |
typedef long long __kernel_loff_t; |
#endif |
typedef struct { |
int val[2]; |
} __kernel_fsid_t; |
#if defined(__KERNEL__) |
#undef __FD_SET |
#define __FD_SET(fd,fdsetp) \ |
asm volatile("btsl %1,%0": \ |
"+m" (*(__kernel_fd_set *)(fdsetp)) \ |
: "r" ((int)(fd))) |
#undef __FD_CLR |
#define __FD_CLR(fd,fdsetp) \ |
asm volatile("btrl %1,%0": \ |
"+m" (*(__kernel_fd_set *)(fdsetp)) \ |
: "r" ((int) (fd))) |
#undef __FD_ISSET |
#define __FD_ISSET(fd,fdsetp) \ |
(__extension__ \ |
({ \ |
unsigned char __result; \ |
asm volatile("btl %1,%2 ; setb %0" \ |
: "=q" (__result) \ |
: "r" ((int)(fd)), \ |
"m" (*(__kernel_fd_set *)(fdsetp))); \ |
__result; \ |
})) |
#undef __FD_ZERO |
#define __FD_ZERO(fdsetp) \ |
do { \ |
int __d0, __d1; \ |
asm volatile("cld ; rep ; stosl" \ |
: "=m" (*(__kernel_fd_set *)(fdsetp)), \ |
"=&c" (__d0), "=&D" (__d1) \ |
: "a" (0), "1" (__FDSET_LONGS), \ |
"2" ((__kernel_fd_set *)(fdsetp)) \ |
: "memory"); \ |
} while (0) |
#endif /* defined(__KERNEL__) */ |
#endif /* _ASM_X86_POSIX_TYPES_32_H */ |
/drivers/include/linux/asm/required-features.h |
---|
0,0 → 1,88 |
#ifndef _ASM_X86_REQUIRED_FEATURES_H |
#define _ASM_X86_REQUIRED_FEATURES_H |
/* Define minimum CPUID feature set for kernel These bits are checked |
really early to actually display a visible error message before the |
kernel dies. Make sure to assign features to the proper mask! |
Some requirements that are not in CPUID yet are also in the |
CONFIG_X86_MINIMUM_CPU_FAMILY which is checked too. |
The real information is in arch/x86/Kconfig.cpu, this just converts |
the CONFIGs into a bitmask */ |
#ifndef CONFIG_MATH_EMULATION |
# define NEED_FPU (1<<(X86_FEATURE_FPU & 31)) |
#else |
# define NEED_FPU 0 |
#endif |
#if defined(CONFIG_X86_PAE) || defined(CONFIG_X86_64) |
# define NEED_PAE (1<<(X86_FEATURE_PAE & 31)) |
#else |
# define NEED_PAE 0 |
#endif |
#ifdef CONFIG_X86_CMPXCHG64 |
# define NEED_CX8 (1<<(X86_FEATURE_CX8 & 31)) |
#else |
# define NEED_CX8 0 |
#endif |
#if defined(CONFIG_X86_CMOV) || defined(CONFIG_X86_64) |
# define NEED_CMOV (1<<(X86_FEATURE_CMOV & 31)) |
#else |
# define NEED_CMOV 0 |
#endif |
#ifdef CONFIG_X86_USE_3DNOW |
# define NEED_3DNOW (1<<(X86_FEATURE_3DNOW & 31)) |
#else |
# define NEED_3DNOW 0 |
#endif |
#if defined(CONFIG_X86_P6_NOP) || defined(CONFIG_X86_64) |
# define NEED_NOPL (1<<(X86_FEATURE_NOPL & 31)) |
#else |
# define NEED_NOPL 0 |
#endif |
#ifdef CONFIG_X86_64 |
#ifdef CONFIG_PARAVIRT |
/* Paravirtualized systems may not have PSE or PGE available */ |
#define NEED_PSE 0 |
#define NEED_PGE 0 |
#else |
#define NEED_PSE (1<<(X86_FEATURE_PSE) & 31) |
#define NEED_PGE (1<<(X86_FEATURE_PGE) & 31) |
#endif |
#define NEED_MSR (1<<(X86_FEATURE_MSR & 31)) |
#define NEED_FXSR (1<<(X86_FEATURE_FXSR & 31)) |
#define NEED_XMM (1<<(X86_FEATURE_XMM & 31)) |
#define NEED_XMM2 (1<<(X86_FEATURE_XMM2 & 31)) |
#define NEED_LM (1<<(X86_FEATURE_LM & 31)) |
#else |
#define NEED_PSE 0 |
#define NEED_MSR 0 |
#define NEED_PGE 0 |
#define NEED_FXSR 0 |
#define NEED_XMM 0 |
#define NEED_XMM2 0 |
#define NEED_LM 0 |
#endif |
#define REQUIRED_MASK0 (NEED_FPU|NEED_PSE|NEED_MSR|NEED_PAE|\ |
NEED_CX8|NEED_PGE|NEED_FXSR|NEED_CMOV|\ |
NEED_XMM|NEED_XMM2) |
#define SSE_MASK (NEED_XMM|NEED_XMM2) |
#define REQUIRED_MASK1 (NEED_LM|NEED_3DNOW) |
#define REQUIRED_MASK2 0 |
#define REQUIRED_MASK3 (NEED_NOPL) |
#define REQUIRED_MASK4 0 |
#define REQUIRED_MASK5 0 |
#define REQUIRED_MASK6 0 |
#define REQUIRED_MASK7 0 |
#endif /* _ASM_X86_REQUIRED_FEATURES_H */ |
/drivers/include/linux/asm/spinlock_types.h |
---|
0,0 → 1,20 |
#ifndef _ASM_X86_SPINLOCK_TYPES_H |
#define _ASM_X86_SPINLOCK_TYPES_H |
#ifndef __LINUX_SPINLOCK_TYPES_H |
# error "please don't include this file directly" |
#endif |
typedef struct raw_spinlock { |
unsigned int slock; |
} raw_spinlock_t; |
#define __RAW_SPIN_LOCK_UNLOCKED { 0 } |
typedef struct { |
unsigned int lock; |
} raw_rwlock_t; |
#define __RAW_RW_LOCK_UNLOCKED { RW_LOCK_BIAS } |
#endif /* _ASM_X86_SPINLOCK_TYPES_H */ |
/drivers/include/linux/asm/string.h |
---|
0,0 → 1,5 |
#ifdef CONFIG_X86_32 |
# include "string_32.h" |
#else |
# include "string_64.h" |
#endif |
/drivers/include/linux/asm/string_32.h |
---|
0,0 → 1,342 |
#ifndef _ASM_X86_STRING_32_H |
#define _ASM_X86_STRING_32_H |
#ifdef __KERNEL__ |
/* Let gcc decide whether to inline or use the out of line functions */ |
#define __HAVE_ARCH_STRCPY |
extern char *strcpy(char *dest, const char *src); |
#define __HAVE_ARCH_STRNCPY |
extern char *strncpy(char *dest, const char *src, size_t count); |
#define __HAVE_ARCH_STRCAT |
extern char *strcat(char *dest, const char *src); |
#define __HAVE_ARCH_STRNCAT |
extern char *strncat(char *dest, const char *src, size_t count); |
#define __HAVE_ARCH_STRCMP |
extern int strcmp(const char *cs, const char *ct); |
#define __HAVE_ARCH_STRNCMP |
extern int strncmp(const char *cs, const char *ct, size_t count); |
#define __HAVE_ARCH_STRCHR |
extern char *strchr(const char *s, int c); |
#define __HAVE_ARCH_STRLEN |
extern size_t strlen(const char *s); |
static __always_inline void *__memcpy(void *to, const void *from, size_t n) |
{ |
int d0, d1, d2; |
asm volatile("rep ; movsl\n\t" |
"movl %4,%%ecx\n\t" |
"andl $3,%%ecx\n\t" |
"jz 1f\n\t" |
"rep ; movsb\n\t" |
"1:" |
: "=&c" (d0), "=&D" (d1), "=&S" (d2) |
: "0" (n / 4), "g" (n), "1" ((long)to), "2" ((long)from) |
: "memory"); |
return to; |
} |
/* |
* This looks ugly, but the compiler can optimize it totally, |
* as the count is constant. |
*/ |
static __always_inline void *__constant_memcpy(void *to, const void *from, |
size_t n) |
{ |
long esi, edi; |
if (!n) |
return to; |
switch (n) { |
case 1: |
*(char *)to = *(char *)from; |
return to; |
case 2: |
*(short *)to = *(short *)from; |
return to; |
case 4: |
*(int *)to = *(int *)from; |
return to; |
case 3: |
*(short *)to = *(short *)from; |
*((char *)to + 2) = *((char *)from + 2); |
return to; |
case 5: |
*(int *)to = *(int *)from; |
*((char *)to + 4) = *((char *)from + 4); |
return to; |
case 6: |
*(int *)to = *(int *)from; |
*((short *)to + 2) = *((short *)from + 2); |
return to; |
case 8: |
*(int *)to = *(int *)from; |
*((int *)to + 1) = *((int *)from + 1); |
return to; |
} |
esi = (long)from; |
edi = (long)to; |
if (n >= 5 * 4) { |
/* large block: use rep prefix */ |
int ecx; |
asm volatile("rep ; movsl" |
: "=&c" (ecx), "=&D" (edi), "=&S" (esi) |
: "0" (n / 4), "1" (edi), "2" (esi) |
: "memory" |
); |
} else { |
/* small block: don't clobber ecx + smaller code */ |
if (n >= 4 * 4) |
asm volatile("movsl" |
: "=&D"(edi), "=&S"(esi) |
: "0"(edi), "1"(esi) |
: "memory"); |
if (n >= 3 * 4) |
asm volatile("movsl" |
: "=&D"(edi), "=&S"(esi) |
: "0"(edi), "1"(esi) |
: "memory"); |
if (n >= 2 * 4) |
asm volatile("movsl" |
: "=&D"(edi), "=&S"(esi) |
: "0"(edi), "1"(esi) |
: "memory"); |
if (n >= 1 * 4) |
asm volatile("movsl" |
: "=&D"(edi), "=&S"(esi) |
: "0"(edi), "1"(esi) |
: "memory"); |
} |
switch (n % 4) { |
/* tail */ |
case 0: |
return to; |
case 1: |
asm volatile("movsb" |
: "=&D"(edi), "=&S"(esi) |
: "0"(edi), "1"(esi) |
: "memory"); |
return to; |
case 2: |
asm volatile("movsw" |
: "=&D"(edi), "=&S"(esi) |
: "0"(edi), "1"(esi) |
: "memory"); |
return to; |
default: |
asm volatile("movsw\n\tmovsb" |
: "=&D"(edi), "=&S"(esi) |
: "0"(edi), "1"(esi) |
: "memory"); |
return to; |
} |
} |
#define __HAVE_ARCH_MEMCPY |
#ifdef CONFIG_X86_USE_3DNOW |
#include <asm/mmx.h> |
/* |
* This CPU favours 3DNow strongly (eg AMD Athlon) |
*/ |
static inline void *__constant_memcpy3d(void *to, const void *from, size_t len) |
{ |
if (len < 512) |
return __constant_memcpy(to, from, len); |
return _mmx_memcpy(to, from, len); |
} |
static inline void *__memcpy3d(void *to, const void *from, size_t len) |
{ |
if (len < 512) |
return __memcpy(to, from, len); |
return _mmx_memcpy(to, from, len); |
} |
#define memcpy(t, f, n) \ |
(__builtin_constant_p((n)) \ |
? __constant_memcpy3d((t), (f), (n)) \ |
: __memcpy3d((t), (f), (n))) |
#else |
/* |
* No 3D Now! |
*/ |
#ifndef CONFIG_KMEMCHECK |
#if (__GNUC__ >= 4) |
#define memcpy(t, f, n) __builtin_memcpy(t, f, n) |
#else |
#define memcpy(t, f, n) \ |
(__builtin_constant_p((n)) \ |
? __constant_memcpy((t), (f), (n)) \ |
: __memcpy((t), (f), (n))) |
#endif |
#else |
/* |
* kmemcheck becomes very happy if we use the REP instructions unconditionally, |
* because it means that we know both memory operands in advance. |
*/ |
#define memcpy(t, f, n) __memcpy((t), (f), (n)) |
#endif |
#endif |
#define __HAVE_ARCH_MEMMOVE |
void *memmove(void *dest, const void *src, size_t n); |
#define memcmp __builtin_memcmp |
#define __HAVE_ARCH_MEMCHR |
extern void *memchr(const void *cs, int c, size_t count); |
static inline void *__memset_generic(void *s, char c, size_t count) |
{ |
int d0, d1; |
asm volatile("rep\n\t" |
"stosb" |
: "=&c" (d0), "=&D" (d1) |
: "a" (c), "1" (s), "0" (count) |
: "memory"); |
return s; |
} |
/* we might want to write optimized versions of these later */ |
#define __constant_count_memset(s, c, count) __memset_generic((s), (c), (count)) |
/* |
* memset(x, 0, y) is a reasonably common thing to do, so we want to fill |
* things 32 bits at a time even when we don't know the size of the |
* area at compile-time.. |
*/ |
static __always_inline |
void *__constant_c_memset(void *s, unsigned long c, size_t count) |
{ |
int d0, d1; |
asm volatile("rep ; stosl\n\t" |
"testb $2,%b3\n\t" |
"je 1f\n\t" |
"stosw\n" |
"1:\ttestb $1,%b3\n\t" |
"je 2f\n\t" |
"stosb\n" |
"2:" |
: "=&c" (d0), "=&D" (d1) |
: "a" (c), "q" (count), "0" (count/4), "1" ((long)s) |
: "memory"); |
return s; |
} |
/* Added by Gertjan van Wingerde to make minix and sysv module work */ |
#define __HAVE_ARCH_STRNLEN |
extern size_t strnlen(const char *s, size_t count); |
/* end of additional stuff */ |
#define __HAVE_ARCH_STRSTR |
extern char *strstr(const char *cs, const char *ct); |
/* |
* This looks horribly ugly, but the compiler can optimize it totally, |
* as we by now know that both pattern and count is constant.. |
*/ |
static __always_inline |
void *__constant_c_and_count_memset(void *s, unsigned long pattern, |
size_t count) |
{ |
switch (count) { |
case 0: |
return s; |
case 1: |
*(unsigned char *)s = pattern & 0xff; |
return s; |
case 2: |
*(unsigned short *)s = pattern & 0xffff; |
return s; |
case 3: |
*(unsigned short *)s = pattern & 0xffff; |
*((unsigned char *)s + 2) = pattern & 0xff; |
return s; |
case 4: |
*(unsigned long *)s = pattern; |
return s; |
} |
#define COMMON(x) \ |
asm volatile("rep ; stosl" \ |
x \ |
: "=&c" (d0), "=&D" (d1) \ |
: "a" (eax), "0" (count/4), "1" ((long)s) \ |
: "memory") |
{ |
int d0, d1; |
#if __GNUC__ == 4 && __GNUC_MINOR__ == 0 |
/* Workaround for broken gcc 4.0 */ |
register unsigned long eax asm("%eax") = pattern; |
#else |
unsigned long eax = pattern; |
#endif |
switch (count % 4) { |
case 0: |
COMMON(""); |
return s; |
case 1: |
COMMON("\n\tstosb"); |
return s; |
case 2: |
COMMON("\n\tstosw"); |
return s; |
default: |
COMMON("\n\tstosw\n\tstosb"); |
return s; |
} |
} |
#undef COMMON |
} |
#define __constant_c_x_memset(s, c, count) \ |
(__builtin_constant_p(count) \ |
? __constant_c_and_count_memset((s), (c), (count)) \ |
: __constant_c_memset((s), (c), (count))) |
#define __memset(s, c, count) \ |
(__builtin_constant_p(count) \ |
? __constant_count_memset((s), (c), (count)) \ |
: __memset_generic((s), (c), (count))) |
#define __HAVE_ARCH_MEMSET |
#if (__GNUC__ >= 4) |
#define memset(s, c, count) __builtin_memset(s, c, count) |
#else |
#define memset(s, c, count) \ |
(__builtin_constant_p(c) \ |
? __constant_c_x_memset((s), (0x01010101UL * (unsigned char)(c)), \ |
(count)) \ |
: __memset((s), (c), (count))) |
#endif |
/* |
* find the first occurrence of byte 'c', or 1 past the area if none |
*/ |
#define __HAVE_ARCH_MEMSCAN |
extern void *memscan(void *addr, int c, size_t size); |
#endif /* __KERNEL__ */ |
#endif /* _ASM_X86_STRING_32_H */ |
/drivers/include/linux/asm/swab.h |
---|
0,0 → 1,61 |
#ifndef _ASM_X86_SWAB_H |
#define _ASM_X86_SWAB_H |
#include <linux/types.h> |
#include <linux/compiler.h> |
static inline __attribute_const__ __u32 __arch_swab32(__u32 val) |
{ |
#ifdef __i386__ |
# ifdef CONFIG_X86_BSWAP |
asm("bswap %0" : "=r" (val) : "0" (val)); |
# else |
asm("xchgb %b0,%h0\n\t" /* swap lower bytes */ |
"rorl $16,%0\n\t" /* swap words */ |
"xchgb %b0,%h0" /* swap higher bytes */ |
: "=q" (val) |
: "0" (val)); |
# endif |
#else /* __i386__ */ |
asm("bswapl %0" |
: "=r" (val) |
: "0" (val)); |
#endif |
return val; |
} |
#define __arch_swab32 __arch_swab32 |
static inline __attribute_const__ __u64 __arch_swab64(__u64 val) |
{ |
#ifdef __i386__ |
union { |
struct { |
__u32 a; |
__u32 b; |
} s; |
__u64 u; |
} v; |
v.u = val; |
# ifdef CONFIG_X86_BSWAP |
asm("bswapl %0 ; bswapl %1 ; xchgl %0,%1" |
: "=r" (v.s.a), "=r" (v.s.b) |
: "0" (v.s.a), "1" (v.s.b)); |
# else |
v.s.a = __arch_swab32(v.s.a); |
v.s.b = __arch_swab32(v.s.b); |
asm("xchgl %0,%1" |
: "=r" (v.s.a), "=r" (v.s.b) |
: "0" (v.s.a), "1" (v.s.b)); |
# endif |
return v.u; |
#else /* __i386__ */ |
asm("bswapq %0" |
: "=r" (val) |
: "0" (val)); |
return val; |
#endif |
} |
#define __arch_swab64 __arch_swab64 |
#endif /* _ASM_X86_SWAB_H */ |
/drivers/include/linux/asm/types.h |
---|
0,0 → 1,22 |
#ifndef _ASM_X86_TYPES_H |
#define _ASM_X86_TYPES_H |
#define dma_addr_t dma_addr_t |
#include <asm-generic/types.h> |
#ifdef __KERNEL__ |
#ifndef __ASSEMBLY__ |
typedef u64 dma64_addr_t; |
#if defined(CONFIG_X86_64) || defined(CONFIG_HIGHMEM64G) |
/* DMA addresses come in 32-bit and 64-bit flavours. */ |
typedef u64 dma_addr_t; |
#else |
typedef u32 dma_addr_t; |
#endif |
#endif /* __ASSEMBLY__ */ |
#endif /* __KERNEL__ */ |
#endif /* _ASM_X86_TYPES_H */ |
/drivers/include/linux/asm-generic/atomic-long.h |
---|
0,0 → 1,258 |
#ifndef _ASM_GENERIC_ATOMIC_LONG_H |
#define _ASM_GENERIC_ATOMIC_LONG_H |
/* |
* Copyright (C) 2005 Silicon Graphics, Inc. |
* Christoph Lameter |
* |
* Allows to provide arch independent atomic definitions without the need to |
* edit all arch specific atomic.h files. |
*/ |
#include <asm/types.h> |
/* |
* Suppport for atomic_long_t |
* |
* Casts for parameters are avoided for existing atomic functions in order to |
* avoid issues with cast-as-lval under gcc 4.x and other limitations that the |
* macros of a platform may have. |
*/ |
#if BITS_PER_LONG == 64 |
typedef atomic64_t atomic_long_t; |
#define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i) |
static inline long atomic_long_read(atomic_long_t *l) |
{ |
atomic64_t *v = (atomic64_t *)l; |
return (long)atomic64_read(v); |
} |
static inline void atomic_long_set(atomic_long_t *l, long i) |
{ |
atomic64_t *v = (atomic64_t *)l; |
atomic64_set(v, i); |
} |
static inline void atomic_long_inc(atomic_long_t *l) |
{ |
atomic64_t *v = (atomic64_t *)l; |
atomic64_inc(v); |
} |
static inline void atomic_long_dec(atomic_long_t *l) |
{ |
atomic64_t *v = (atomic64_t *)l; |
atomic64_dec(v); |
} |
static inline void atomic_long_add(long i, atomic_long_t *l) |
{ |
atomic64_t *v = (atomic64_t *)l; |
atomic64_add(i, v); |
} |
static inline void atomic_long_sub(long i, atomic_long_t *l) |
{ |
atomic64_t *v = (atomic64_t *)l; |
atomic64_sub(i, v); |
} |
static inline int atomic_long_sub_and_test(long i, atomic_long_t *l) |
{ |
atomic64_t *v = (atomic64_t *)l; |
return atomic64_sub_and_test(i, v); |
} |
static inline int atomic_long_dec_and_test(atomic_long_t *l) |
{ |
atomic64_t *v = (atomic64_t *)l; |
return atomic64_dec_and_test(v); |
} |
static inline int atomic_long_inc_and_test(atomic_long_t *l) |
{ |
atomic64_t *v = (atomic64_t *)l; |
return atomic64_inc_and_test(v); |
} |
static inline int atomic_long_add_negative(long i, atomic_long_t *l) |
{ |
atomic64_t *v = (atomic64_t *)l; |
return atomic64_add_negative(i, v); |
} |
static inline long atomic_long_add_return(long i, atomic_long_t *l) |
{ |
atomic64_t *v = (atomic64_t *)l; |
return (long)atomic64_add_return(i, v); |
} |
static inline long atomic_long_sub_return(long i, atomic_long_t *l) |
{ |
atomic64_t *v = (atomic64_t *)l; |
return (long)atomic64_sub_return(i, v); |
} |
static inline long atomic_long_inc_return(atomic_long_t *l) |
{ |
atomic64_t *v = (atomic64_t *)l; |
return (long)atomic64_inc_return(v); |
} |
static inline long atomic_long_dec_return(atomic_long_t *l) |
{ |
atomic64_t *v = (atomic64_t *)l; |
return (long)atomic64_dec_return(v); |
} |
static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u) |
{ |
atomic64_t *v = (atomic64_t *)l; |
return (long)atomic64_add_unless(v, a, u); |
} |
#define atomic_long_inc_not_zero(l) atomic64_inc_not_zero((atomic64_t *)(l)) |
#define atomic_long_cmpxchg(l, old, new) \ |
(atomic64_cmpxchg((atomic64_t *)(l), (old), (new))) |
#define atomic_long_xchg(v, new) \ |
(atomic64_xchg((atomic64_t *)(v), (new))) |
#else /* BITS_PER_LONG == 64 */ |
typedef atomic_t atomic_long_t; |
#define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i) |
static inline long atomic_long_read(atomic_long_t *l) |
{ |
atomic_t *v = (atomic_t *)l; |
return (long)atomic_read(v); |
} |
static inline void atomic_long_set(atomic_long_t *l, long i) |
{ |
atomic_t *v = (atomic_t *)l; |
atomic_set(v, i); |
} |
static inline void atomic_long_inc(atomic_long_t *l) |
{ |
atomic_t *v = (atomic_t *)l; |
atomic_inc(v); |
} |
static inline void atomic_long_dec(atomic_long_t *l) |
{ |
atomic_t *v = (atomic_t *)l; |
atomic_dec(v); |
} |
static inline void atomic_long_add(long i, atomic_long_t *l) |
{ |
atomic_t *v = (atomic_t *)l; |
atomic_add(i, v); |
} |
static inline void atomic_long_sub(long i, atomic_long_t *l) |
{ |
atomic_t *v = (atomic_t *)l; |
atomic_sub(i, v); |
} |
static inline int atomic_long_sub_and_test(long i, atomic_long_t *l) |
{ |
atomic_t *v = (atomic_t *)l; |
return atomic_sub_and_test(i, v); |
} |
static inline int atomic_long_dec_and_test(atomic_long_t *l) |
{ |
atomic_t *v = (atomic_t *)l; |
return atomic_dec_and_test(v); |
} |
static inline int atomic_long_inc_and_test(atomic_long_t *l) |
{ |
atomic_t *v = (atomic_t *)l; |
return atomic_inc_and_test(v); |
} |
static inline int atomic_long_add_negative(long i, atomic_long_t *l) |
{ |
atomic_t *v = (atomic_t *)l; |
return atomic_add_negative(i, v); |
} |
static inline long atomic_long_add_return(long i, atomic_long_t *l) |
{ |
atomic_t *v = (atomic_t *)l; |
return (long)atomic_add_return(i, v); |
} |
static inline long atomic_long_sub_return(long i, atomic_long_t *l) |
{ |
atomic_t *v = (atomic_t *)l; |
return (long)atomic_sub_return(i, v); |
} |
static inline long atomic_long_inc_return(atomic_long_t *l) |
{ |
atomic_t *v = (atomic_t *)l; |
return (long)atomic_inc_return(v); |
} |
static inline long atomic_long_dec_return(atomic_long_t *l) |
{ |
atomic_t *v = (atomic_t *)l; |
return (long)atomic_dec_return(v); |
} |
static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u) |
{ |
atomic_t *v = (atomic_t *)l; |
return (long)atomic_add_unless(v, a, u); |
} |
#define atomic_long_inc_not_zero(l) atomic_inc_not_zero((atomic_t *)(l)) |
#define atomic_long_cmpxchg(l, old, new) \ |
(atomic_cmpxchg((atomic_t *)(l), (old), (new))) |
#define atomic_long_xchg(v, new) \ |
(atomic_xchg((atomic_t *)(v), (new))) |
#endif /* BITS_PER_LONG == 64 */ |
#endif /* _ASM_GENERIC_ATOMIC_LONG_H */ |
/drivers/include/linux/asm-generic/bitops/ext2-non-atomic.h |
---|
0,0 → 1,20 |
#ifndef _ASM_GENERIC_BITOPS_EXT2_NON_ATOMIC_H_ |
#define _ASM_GENERIC_BITOPS_EXT2_NON_ATOMIC_H_ |
#include <asm-generic/bitops/le.h> |
#define ext2_set_bit(nr,addr) \ |
generic___test_and_set_le_bit((nr),(unsigned long *)(addr)) |
#define ext2_clear_bit(nr,addr) \ |
generic___test_and_clear_le_bit((nr),(unsigned long *)(addr)) |
#define ext2_test_bit(nr,addr) \ |
generic_test_le_bit((nr),(unsigned long *)(addr)) |
#define ext2_find_first_zero_bit(addr, size) \ |
generic_find_first_zero_le_bit((unsigned long *)(addr), (size)) |
#define ext2_find_next_zero_bit(addr, size, off) \ |
generic_find_next_zero_le_bit((unsigned long *)(addr), (size), (off)) |
#define ext2_find_next_bit(addr, size, off) \ |
generic_find_next_le_bit((unsigned long *)(addr), (size), (off)) |
#endif /* _ASM_GENERIC_BITOPS_EXT2_NON_ATOMIC_H_ */ |
/drivers/include/linux/asm-generic/bitops/fls64.h |
---|
0,0 → 1,36 |
#ifndef _ASM_GENERIC_BITOPS_FLS64_H_ |
#define _ASM_GENERIC_BITOPS_FLS64_H_ |
#include <asm/types.h> |
/** |
* fls64 - find last set bit in a 64-bit word |
* @x: the word to search |
* |
* This is defined in a similar way as the libc and compiler builtin |
* ffsll, but returns the position of the most significant set bit. |
* |
* fls64(value) returns 0 if value is 0 or the position of the last |
* set bit if value is nonzero. The last (most significant) bit is |
* at position 64. |
*/ |
#if BITS_PER_LONG == 32 |
static __always_inline int fls64(__u64 x) |
{ |
__u32 h = x >> 32; |
if (h) |
return fls(h) + 32; |
return fls(x); |
} |
#elif BITS_PER_LONG == 64 |
static __always_inline int fls64(__u64 x) |
{ |
if (x == 0) |
return 0; |
return __fls(x) + 1; |
} |
#else |
#error BITS_PER_LONG not 32 or 64 |
#endif |
#endif /* _ASM_GENERIC_BITOPS_FLS64_H_ */ |
/drivers/include/linux/asm-generic/bitops/hweight.h |
---|
0,0 → 1,11 |
#ifndef _ASM_GENERIC_BITOPS_HWEIGHT_H_ |
#define _ASM_GENERIC_BITOPS_HWEIGHT_H_ |
#include <asm/types.h> |
extern unsigned int hweight32(unsigned int w); |
extern unsigned int hweight16(unsigned int w); |
extern unsigned int hweight8(unsigned int w); |
extern unsigned long hweight64(__u64 w); |
#endif /* _ASM_GENERIC_BITOPS_HWEIGHT_H_ */ |
/drivers/include/linux/asm-generic/bitops/le.h |
---|
0,0 → 1,57 |
#ifndef _ASM_GENERIC_BITOPS_LE_H_ |
#define _ASM_GENERIC_BITOPS_LE_H_ |
#include <asm/types.h> |
#include <asm/byteorder.h> |
#define BITOP_WORD(nr) ((nr) / BITS_PER_LONG) |
#define BITOP_LE_SWIZZLE ((BITS_PER_LONG-1) & ~0x7) |
#if defined(__LITTLE_ENDIAN) |
#define generic_test_le_bit(nr, addr) test_bit(nr, addr) |
#define generic___set_le_bit(nr, addr) __set_bit(nr, addr) |
#define generic___clear_le_bit(nr, addr) __clear_bit(nr, addr) |
#define generic_test_and_set_le_bit(nr, addr) test_and_set_bit(nr, addr) |
#define generic_test_and_clear_le_bit(nr, addr) test_and_clear_bit(nr, addr) |
#define generic___test_and_set_le_bit(nr, addr) __test_and_set_bit(nr, addr) |
#define generic___test_and_clear_le_bit(nr, addr) __test_and_clear_bit(nr, addr) |
#define generic_find_next_zero_le_bit(addr, size, offset) find_next_zero_bit(addr, size, offset) |
#define generic_find_next_le_bit(addr, size, offset) \ |
find_next_bit(addr, size, offset) |
#elif defined(__BIG_ENDIAN) |
#define generic_test_le_bit(nr, addr) \ |
test_bit((nr) ^ BITOP_LE_SWIZZLE, (addr)) |
#define generic___set_le_bit(nr, addr) \ |
__set_bit((nr) ^ BITOP_LE_SWIZZLE, (addr)) |
#define generic___clear_le_bit(nr, addr) \ |
__clear_bit((nr) ^ BITOP_LE_SWIZZLE, (addr)) |
#define generic_test_and_set_le_bit(nr, addr) \ |
test_and_set_bit((nr) ^ BITOP_LE_SWIZZLE, (addr)) |
#define generic_test_and_clear_le_bit(nr, addr) \ |
test_and_clear_bit((nr) ^ BITOP_LE_SWIZZLE, (addr)) |
#define generic___test_and_set_le_bit(nr, addr) \ |
__test_and_set_bit((nr) ^ BITOP_LE_SWIZZLE, (addr)) |
#define generic___test_and_clear_le_bit(nr, addr) \ |
__test_and_clear_bit((nr) ^ BITOP_LE_SWIZZLE, (addr)) |
extern unsigned long generic_find_next_zero_le_bit(const unsigned long *addr, |
unsigned long size, unsigned long offset); |
extern unsigned long generic_find_next_le_bit(const unsigned long *addr, |
unsigned long size, unsigned long offset); |
#else |
#error "Please fix <asm/byteorder.h>" |
#endif |
#define generic_find_first_zero_le_bit(addr, size) \ |
generic_find_next_zero_le_bit((addr), (size), 0) |
#endif /* _ASM_GENERIC_BITOPS_LE_H_ */ |
/drivers/include/linux/asm-generic/bitops/minix.h |
---|
0,0 → 1,15 |
#ifndef _ASM_GENERIC_BITOPS_MINIX_H_ |
#define _ASM_GENERIC_BITOPS_MINIX_H_ |
#define minix_test_and_set_bit(nr,addr) \ |
__test_and_set_bit((nr),(unsigned long *)(addr)) |
#define minix_set_bit(nr,addr) \ |
__set_bit((nr),(unsigned long *)(addr)) |
#define minix_test_and_clear_bit(nr,addr) \ |
__test_and_clear_bit((nr),(unsigned long *)(addr)) |
#define minix_test_bit(nr,addr) \ |
test_bit((nr),(unsigned long *)(addr)) |
#define minix_find_first_zero_bit(addr,size) \ |
find_first_zero_bit((unsigned long *)(addr),(size)) |
#endif /* _ASM_GENERIC_BITOPS_MINIX_H_ */ |
/drivers/include/linux/asm-generic/bitops/sched.h |
---|
0,0 → 1,31 |
#ifndef _ASM_GENERIC_BITOPS_SCHED_H_ |
#define _ASM_GENERIC_BITOPS_SCHED_H_ |
#include <linux/compiler.h> /* unlikely() */ |
#include <asm/types.h> |
/* |
* Every architecture must define this function. It's the fastest |
* way of searching a 100-bit bitmap. It's guaranteed that at least |
* one of the 100 bits is cleared. |
*/ |
static inline int sched_find_first_bit(const unsigned long *b) |
{ |
#if BITS_PER_LONG == 64 |
if (b[0]) |
return __ffs(b[0]); |
return __ffs(b[1]) + 64; |
#elif BITS_PER_LONG == 32 |
if (b[0]) |
return __ffs(b[0]); |
if (b[1]) |
return __ffs(b[1]) + 32; |
if (b[2]) |
return __ffs(b[2]) + 64; |
return __ffs(b[3]) + 96; |
#else |
#error BITS_PER_LONG not defined |
#endif |
} |
#endif /* _ASM_GENERIC_BITOPS_SCHED_H_ */ |
/drivers/include/linux/asm-generic/bitsperlong.h |
---|
0,0 → 1,32 |
#ifndef __ASM_GENERIC_BITS_PER_LONG |
#define __ASM_GENERIC_BITS_PER_LONG |
/* |
* There seems to be no way of detecting this automatically from user |
* space, so 64 bit architectures should override this in their |
* bitsperlong.h. In particular, an architecture that supports |
* both 32 and 64 bit user space must not rely on CONFIG_64BIT |
* to decide it, but rather check a compiler provided macro. |
*/ |
#ifndef __BITS_PER_LONG |
#define __BITS_PER_LONG 32 |
#endif |
#ifdef __KERNEL__ |
#ifdef CONFIG_64BIT |
#define BITS_PER_LONG 64 |
#else |
#define BITS_PER_LONG 32 |
#endif /* CONFIG_64BIT */ |
/* |
* FIXME: The check currently breaks x86-64 build, so it's |
* temporarily disabled. Please fix x86-64 and reenable |
*/ |
#if 0 && BITS_PER_LONG != __BITS_PER_LONG |
#error Inconsistent word size. Check asm/bitsperlong.h |
#endif |
#endif /* __KERNEL__ */ |
#endif /* __ASM_GENERIC_BITS_PER_LONG */ |
/drivers/include/linux/asm-generic/int-ll64.h |
---|
0,0 → 1,78 |
/* |
* asm-generic/int-ll64.h |
* |
* Integer declarations for architectures which use "long long" |
* for 64-bit types. |
*/ |
#ifndef _ASM_GENERIC_INT_LL64_H |
#define _ASM_GENERIC_INT_LL64_H |
#include <asm/bitsperlong.h> |
#ifndef __ASSEMBLY__ |
/* |
* __xx is ok: it doesn't pollute the POSIX namespace. Use these in the |
* header files exported to user space |
*/ |
typedef __signed__ char __s8; |
typedef unsigned char __u8; |
typedef __signed__ short __s16; |
typedef unsigned short __u16; |
typedef __signed__ int __s32; |
typedef unsigned int __u32; |
#ifdef __GNUC__ |
__extension__ typedef __signed__ long long __s64; |
__extension__ typedef unsigned long long __u64; |
#else |
typedef __signed__ long long __s64; |
typedef unsigned long long __u64; |
#endif |
#endif /* __ASSEMBLY__ */ |
#ifdef __KERNEL__ |
#ifndef __ASSEMBLY__ |
typedef signed char s8; |
typedef unsigned char u8; |
typedef signed short s16; |
typedef unsigned short u16; |
typedef signed int s32; |
typedef unsigned int u32; |
typedef signed long long s64; |
typedef unsigned long long u64; |
#define S8_C(x) x |
#define U8_C(x) x ## U |
#define S16_C(x) x |
#define U16_C(x) x ## U |
#define S32_C(x) x |
#define U32_C(x) x ## U |
#define S64_C(x) x ## LL |
#define U64_C(x) x ## ULL |
#else /* __ASSEMBLY__ */ |
#define S8_C(x) x |
#define U8_C(x) x |
#define S16_C(x) x |
#define U16_C(x) x |
#define S32_C(x) x |
#define U32_C(x) x |
#define S64_C(x) x |
#define U64_C(x) x |
#endif /* __ASSEMBLY__ */ |
#endif /* __KERNEL__ */ |
#endif /* _ASM_GENERIC_INT_LL64_H */ |
/drivers/include/linux/asm-generic/types.h |
---|
0,0 → 1,42 |
#ifndef _ASM_GENERIC_TYPES_H |
#define _ASM_GENERIC_TYPES_H |
/* |
* int-ll64 is used practically everywhere now, |
* so use it as a reasonable default. |
*/ |
#include <asm-generic/int-ll64.h> |
#ifndef __ASSEMBLY__ |
typedef unsigned short umode_t; |
#endif /* __ASSEMBLY__ */ |
/* |
* These aren't exported outside the kernel to avoid name space clashes |
*/ |
#ifdef __KERNEL__ |
#ifndef __ASSEMBLY__ |
/* |
* DMA addresses may be very different from physical addresses |
* and pointers. i386 and powerpc may have 64 bit DMA on 32 bit |
* systems, while sparc64 uses 32 bit DMA addresses for 64 bit |
* physical addresses. |
* This default defines dma_addr_t to have the same size as |
* phys_addr_t, which is the most common way. |
* Do not define the dma64_addr_t type, which never really |
* worked. |
*/ |
#ifndef dma_addr_t |
#ifdef CONFIG_PHYS_ADDR_T_64BIT |
typedef u64 dma_addr_t; |
#else |
typedef u32 dma_addr_t; |
#endif /* CONFIG_PHYS_ADDR_T_64BIT */ |
#endif /* dma_addr_t */ |
#endif /* __ASSEMBLY__ */ |
#endif /* __KERNEL__ */ |
#endif /* _ASM_GENERIC_TYPES_H */ |
/drivers/include/linux/bitmap.h |
---|
0,0 → 1,293 |
#ifndef __LINUX_BITMAP_H |
#define __LINUX_BITMAP_H |
#ifndef __ASSEMBLY__ |
#include <linux/types.h> |
#include <linux/bitops.h> |
#include <linux/string.h> |
#include <linux/kernel.h> |
/* |
* bitmaps provide bit arrays that consume one or more unsigned |
* longs. The bitmap interface and available operations are listed |
* here, in bitmap.h |
* |
* Function implementations generic to all architectures are in |
* lib/bitmap.c. Functions implementations that are architecture |
* specific are in various include/asm-<arch>/bitops.h headers |
* and other arch/<arch> specific files. |
* |
* See lib/bitmap.c for more details. |
*/ |
/* |
* The available bitmap operations and their rough meaning in the |
* case that the bitmap is a single unsigned long are thus: |
* |
* Note that nbits should be always a compile time evaluable constant. |
* Otherwise many inlines will generate horrible code. |
* |
* bitmap_zero(dst, nbits) *dst = 0UL |
* bitmap_fill(dst, nbits) *dst = ~0UL |
* bitmap_copy(dst, src, nbits) *dst = *src |
* bitmap_and(dst, src1, src2, nbits) *dst = *src1 & *src2 |
* bitmap_or(dst, src1, src2, nbits) *dst = *src1 | *src2 |
* bitmap_xor(dst, src1, src2, nbits) *dst = *src1 ^ *src2 |
* bitmap_andnot(dst, src1, src2, nbits) *dst = *src1 & ~(*src2) |
* bitmap_complement(dst, src, nbits) *dst = ~(*src) |
* bitmap_equal(src1, src2, nbits) Are *src1 and *src2 equal? |
* bitmap_intersects(src1, src2, nbits) Do *src1 and *src2 overlap? |
* bitmap_subset(src1, src2, nbits) Is *src1 a subset of *src2? |
* bitmap_empty(src, nbits) Are all bits zero in *src? |
* bitmap_full(src, nbits) Are all bits set in *src? |
* bitmap_weight(src, nbits) Hamming Weight: number set bits |
* bitmap_shift_right(dst, src, n, nbits) *dst = *src >> n |
* bitmap_shift_left(dst, src, n, nbits) *dst = *src << n |
* bitmap_remap(dst, src, old, new, nbits) *dst = map(old, new)(src) |
* bitmap_bitremap(oldbit, old, new, nbits) newbit = map(old, new)(oldbit) |
* bitmap_onto(dst, orig, relmap, nbits) *dst = orig relative to relmap |
* bitmap_fold(dst, orig, sz, nbits) dst bits = orig bits mod sz |
* bitmap_scnprintf(buf, len, src, nbits) Print bitmap src to buf |
* bitmap_parse(buf, buflen, dst, nbits) Parse bitmap dst from kernel buf |
* bitmap_parse_user(ubuf, ulen, dst, nbits) Parse bitmap dst from user buf |
* bitmap_scnlistprintf(buf, len, src, nbits) Print bitmap src as list to buf |
* bitmap_parselist(buf, dst, nbits) Parse bitmap dst from list |
* bitmap_find_free_region(bitmap, bits, order) Find and allocate bit region |
* bitmap_release_region(bitmap, pos, order) Free specified bit region |
* bitmap_allocate_region(bitmap, pos, order) Allocate specified bit region |
*/ |
/* |
* Also the following operations in asm/bitops.h apply to bitmaps. |
* |
* set_bit(bit, addr) *addr |= bit |
* clear_bit(bit, addr) *addr &= ~bit |
* change_bit(bit, addr) *addr ^= bit |
* test_bit(bit, addr) Is bit set in *addr? |
* test_and_set_bit(bit, addr) Set bit and return old value |
* test_and_clear_bit(bit, addr) Clear bit and return old value |
* test_and_change_bit(bit, addr) Change bit and return old value |
* find_first_zero_bit(addr, nbits) Position first zero bit in *addr |
* find_first_bit(addr, nbits) Position first set bit in *addr |
* find_next_zero_bit(addr, nbits, bit) Position next zero bit in *addr >= bit |
* find_next_bit(addr, nbits, bit) Position next set bit in *addr >= bit |
*/ |
/* |
* The DECLARE_BITMAP(name,bits) macro, in linux/types.h, can be used |
* to declare an array named 'name' of just enough unsigned longs to |
* contain all bit positions from 0 to 'bits' - 1. |
*/ |
/* |
* lib/bitmap.c provides these functions: |
*/ |
extern int __bitmap_empty(const unsigned long *bitmap, int bits); |
extern int __bitmap_full(const unsigned long *bitmap, int bits); |
extern int __bitmap_equal(const unsigned long *bitmap1, |
const unsigned long *bitmap2, int bits); |
extern void __bitmap_complement(unsigned long *dst, const unsigned long *src, |
int bits); |
extern void __bitmap_shift_right(unsigned long *dst, |
const unsigned long *src, int shift, int bits); |
extern void __bitmap_shift_left(unsigned long *dst, |
const unsigned long *src, int shift, int bits); |
extern int __bitmap_and(unsigned long *dst, const unsigned long *bitmap1, |
const unsigned long *bitmap2, int bits); |
extern void __bitmap_or(unsigned long *dst, const unsigned long *bitmap1, |
const unsigned long *bitmap2, int bits); |
extern void __bitmap_xor(unsigned long *dst, const unsigned long *bitmap1, |
const unsigned long *bitmap2, int bits); |
extern int __bitmap_andnot(unsigned long *dst, const unsigned long *bitmap1, |
const unsigned long *bitmap2, int bits); |
extern int __bitmap_intersects(const unsigned long *bitmap1, |
const unsigned long *bitmap2, int bits); |
extern int __bitmap_subset(const unsigned long *bitmap1, |
const unsigned long *bitmap2, int bits); |
extern int __bitmap_weight(const unsigned long *bitmap, int bits); |
extern int bitmap_scnprintf(char *buf, unsigned int len, |
const unsigned long *src, int nbits); |
extern int __bitmap_parse(const char *buf, unsigned int buflen, int is_user, |
unsigned long *dst, int nbits); |
extern int bitmap_parse_user(const char __user *ubuf, unsigned int ulen, |
unsigned long *dst, int nbits); |
extern int bitmap_scnlistprintf(char *buf, unsigned int len, |
const unsigned long *src, int nbits); |
extern int bitmap_parselist(const char *buf, unsigned long *maskp, |
int nmaskbits); |
extern void bitmap_remap(unsigned long *dst, const unsigned long *src, |
const unsigned long *old, const unsigned long *new, int bits); |
extern int bitmap_bitremap(int oldbit, |
const unsigned long *old, const unsigned long *new, int bits); |
extern void bitmap_onto(unsigned long *dst, const unsigned long *orig, |
const unsigned long *relmap, int bits); |
extern void bitmap_fold(unsigned long *dst, const unsigned long *orig, |
int sz, int bits); |
extern int bitmap_find_free_region(unsigned long *bitmap, int bits, int order); |
extern void bitmap_release_region(unsigned long *bitmap, int pos, int order); |
extern int bitmap_allocate_region(unsigned long *bitmap, int pos, int order); |
extern void bitmap_copy_le(void *dst, const unsigned long *src, int nbits); |
#define BITMAP_LAST_WORD_MASK(nbits) \ |
( \ |
((nbits) % BITS_PER_LONG) ? \ |
(1UL<<((nbits) % BITS_PER_LONG))-1 : ~0UL \ |
) |
#define small_const_nbits(nbits) \ |
(__builtin_constant_p(nbits) && (nbits) <= BITS_PER_LONG) |
static inline void bitmap_zero(unsigned long *dst, int nbits) |
{ |
if (small_const_nbits(nbits)) |
*dst = 0UL; |
else { |
int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long); |
memset(dst, 0, len); |
} |
} |
static inline void bitmap_fill(unsigned long *dst, int nbits) |
{ |
size_t nlongs = BITS_TO_LONGS(nbits); |
if (!small_const_nbits(nbits)) { |
int len = (nlongs - 1) * sizeof(unsigned long); |
memset(dst, 0xff, len); |
} |
dst[nlongs - 1] = BITMAP_LAST_WORD_MASK(nbits); |
} |
static inline void bitmap_copy(unsigned long *dst, const unsigned long *src, |
int nbits) |
{ |
if (small_const_nbits(nbits)) |
*dst = *src; |
else { |
int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long); |
memcpy(dst, src, len); |
} |
} |
static inline int bitmap_and(unsigned long *dst, const unsigned long *src1, |
const unsigned long *src2, int nbits) |
{ |
if (small_const_nbits(nbits)) |
return (*dst = *src1 & *src2) != 0; |
return __bitmap_and(dst, src1, src2, nbits); |
} |
static inline void bitmap_or(unsigned long *dst, const unsigned long *src1, |
const unsigned long *src2, int nbits) |
{ |
if (small_const_nbits(nbits)) |
*dst = *src1 | *src2; |
else |
__bitmap_or(dst, src1, src2, nbits); |
} |
static inline void bitmap_xor(unsigned long *dst, const unsigned long *src1, |
const unsigned long *src2, int nbits) |
{ |
if (small_const_nbits(nbits)) |
*dst = *src1 ^ *src2; |
else |
__bitmap_xor(dst, src1, src2, nbits); |
} |
static inline int bitmap_andnot(unsigned long *dst, const unsigned long *src1, |
const unsigned long *src2, int nbits) |
{ |
if (small_const_nbits(nbits)) |
return (*dst = *src1 & ~(*src2)) != 0; |
return __bitmap_andnot(dst, src1, src2, nbits); |
} |
static inline void bitmap_complement(unsigned long *dst, const unsigned long *src, |
int nbits) |
{ |
if (small_const_nbits(nbits)) |
*dst = ~(*src) & BITMAP_LAST_WORD_MASK(nbits); |
else |
__bitmap_complement(dst, src, nbits); |
} |
static inline int bitmap_equal(const unsigned long *src1, |
const unsigned long *src2, int nbits) |
{ |
if (small_const_nbits(nbits)) |
return ! ((*src1 ^ *src2) & BITMAP_LAST_WORD_MASK(nbits)); |
else |
return __bitmap_equal(src1, src2, nbits); |
} |
static inline int bitmap_intersects(const unsigned long *src1, |
const unsigned long *src2, int nbits) |
{ |
if (small_const_nbits(nbits)) |
return ((*src1 & *src2) & BITMAP_LAST_WORD_MASK(nbits)) != 0; |
else |
return __bitmap_intersects(src1, src2, nbits); |
} |
static inline int bitmap_subset(const unsigned long *src1, |
const unsigned long *src2, int nbits) |
{ |
if (small_const_nbits(nbits)) |
return ! ((*src1 & ~(*src2)) & BITMAP_LAST_WORD_MASK(nbits)); |
else |
return __bitmap_subset(src1, src2, nbits); |
} |
static inline int bitmap_empty(const unsigned long *src, int nbits) |
{ |
if (small_const_nbits(nbits)) |
return ! (*src & BITMAP_LAST_WORD_MASK(nbits)); |
else |
return __bitmap_empty(src, nbits); |
} |
static inline int bitmap_full(const unsigned long *src, int nbits) |
{ |
if (small_const_nbits(nbits)) |
return ! (~(*src) & BITMAP_LAST_WORD_MASK(nbits)); |
else |
return __bitmap_full(src, nbits); |
} |
static inline int bitmap_weight(const unsigned long *src, int nbits) |
{ |
if (small_const_nbits(nbits)) |
return hweight_long(*src & BITMAP_LAST_WORD_MASK(nbits)); |
return __bitmap_weight(src, nbits); |
} |
static inline void bitmap_shift_right(unsigned long *dst, |
const unsigned long *src, int n, int nbits) |
{ |
if (small_const_nbits(nbits)) |
*dst = *src >> n; |
else |
__bitmap_shift_right(dst, src, n, nbits); |
} |
static inline void bitmap_shift_left(unsigned long *dst, |
const unsigned long *src, int n, int nbits) |
{ |
if (small_const_nbits(nbits)) |
*dst = (*src << n) & BITMAP_LAST_WORD_MASK(nbits); |
else |
__bitmap_shift_left(dst, src, n, nbits); |
} |
static inline int bitmap_parse(const char *buf, unsigned int buflen, |
unsigned long *maskp, int nmaskbits) |
{ |
return __bitmap_parse(buf, buflen, 0, maskp, nmaskbits); |
} |
#endif /* __ASSEMBLY__ */ |
#endif /* __LINUX_BITMAP_H */ |
/drivers/include/linux/bitops.h |
---|
0,0 → 1,191 |
#ifndef _LINUX_BITOPS_H |
#define _LINUX_BITOPS_H |
#define BIT(nr) (1UL << (nr)) |
#define BIT_MASK(nr) (1UL << ((nr) % BITS_PER_LONG)) |
#define BIT_WORD(nr) ((nr) / BITS_PER_LONG) |
#define BITS_PER_BYTE 8 |
#define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long)) |
/* |
* Include this here because some architectures need generic_ffs/fls in |
* scope |
*/ |
#include <asm/bitops.h> |
#define for_each_bit(bit, addr, size) \ |
for ((bit) = find_first_bit((addr), (size)); \ |
(bit) < (size); \ |
(bit) = find_next_bit((addr), (size), (bit) + 1)) |
static __inline__ int get_bitmask_order(unsigned int count) |
{ |
int order; |
order = fls(count); |
return order; /* We could be slightly more clever with -1 here... */ |
} |
static __inline__ int get_count_order(unsigned int count) |
{ |
int order; |
order = fls(count) - 1; |
if (count & (count - 1)) |
order++; |
return order; |
} |
static inline unsigned long hweight_long(unsigned long w) |
{ |
return sizeof(w) == 4 ? hweight32(w) : hweight64(w); |
} |
/** |
* rol32 - rotate a 32-bit value left |
* @word: value to rotate |
* @shift: bits to roll |
*/ |
static inline __u32 rol32(__u32 word, unsigned int shift) |
{ |
return (word << shift) | (word >> (32 - shift)); |
} |
/** |
* ror32 - rotate a 32-bit value right |
* @word: value to rotate |
* @shift: bits to roll |
*/ |
static inline __u32 ror32(__u32 word, unsigned int shift) |
{ |
return (word >> shift) | (word << (32 - shift)); |
} |
/** |
* rol16 - rotate a 16-bit value left |
* @word: value to rotate |
* @shift: bits to roll |
*/ |
static inline __u16 rol16(__u16 word, unsigned int shift) |
{ |
return (word << shift) | (word >> (16 - shift)); |
} |
/** |
* ror16 - rotate a 16-bit value right |
* @word: value to rotate |
* @shift: bits to roll |
*/ |
static inline __u16 ror16(__u16 word, unsigned int shift) |
{ |
return (word >> shift) | (word << (16 - shift)); |
} |
/** |
* rol8 - rotate an 8-bit value left |
* @word: value to rotate |
* @shift: bits to roll |
*/ |
static inline __u8 rol8(__u8 word, unsigned int shift) |
{ |
return (word << shift) | (word >> (8 - shift)); |
} |
/** |
* ror8 - rotate an 8-bit value right |
* @word: value to rotate |
* @shift: bits to roll |
*/ |
static inline __u8 ror8(__u8 word, unsigned int shift) |
{ |
return (word >> shift) | (word << (8 - shift)); |
} |
static inline unsigned fls_long(unsigned long l) |
{ |
if (sizeof(l) == 4) |
return fls(l); |
return fls64(l); |
} |
/** |
* __ffs64 - find first set bit in a 64 bit word |
* @word: The 64 bit word |
* |
* On 64 bit arches this is a synomyn for __ffs |
* The result is not defined if no bits are set, so check that @word |
* is non-zero before calling this. |
*/ |
static inline unsigned long __ffs64(u64 word) |
{ |
#if BITS_PER_LONG == 32 |
if (((u32)word) == 0UL) |
return __ffs((u32)(word >> 32)) + 32; |
#elif BITS_PER_LONG != 64 |
#error BITS_PER_LONG not 32 or 64 |
#endif |
return __ffs((unsigned long)word); |
} |
#ifdef __KERNEL__ |
#ifdef CONFIG_GENERIC_FIND_FIRST_BIT |
/** |
* find_first_bit - find the first set bit in a memory region |
* @addr: The address to start the search at |
* @size: The maximum size to search |
* |
* Returns the bit number of the first set bit. |
*/ |
extern unsigned long find_first_bit(const unsigned long *addr, |
unsigned long size); |
/** |
* find_first_zero_bit - find the first cleared bit in a memory region |
* @addr: The address to start the search at |
* @size: The maximum size to search |
* |
* Returns the bit number of the first cleared bit. |
*/ |
extern unsigned long find_first_zero_bit(const unsigned long *addr, |
unsigned long size); |
#endif /* CONFIG_GENERIC_FIND_FIRST_BIT */ |
#ifdef CONFIG_GENERIC_FIND_LAST_BIT |
/** |
* find_last_bit - find the last set bit in a memory region |
* @addr: The address to start the search at |
* @size: The maximum size to search |
* |
* Returns the bit number of the first set bit, or size. |
*/ |
extern unsigned long find_last_bit(const unsigned long *addr, |
unsigned long size); |
#endif /* CONFIG_GENERIC_FIND_LAST_BIT */ |
#ifdef CONFIG_GENERIC_FIND_NEXT_BIT |
/** |
* find_next_bit - find the next set bit in a memory region |
* @addr: The address to base the search on |
* @offset: The bitnumber to start searching at |
* @size: The bitmap size in bits |
*/ |
extern unsigned long find_next_bit(const unsigned long *addr, |
unsigned long size, unsigned long offset); |
/** |
* find_next_zero_bit - find the next cleared bit in a memory region |
* @addr: The address to base the search on |
* @offset: The bitnumber to start searching at |
* @size: The bitmap size in bits |
*/ |
extern unsigned long find_next_zero_bit(const unsigned long *addr, |
unsigned long size, |
unsigned long offset); |
#endif /* CONFIG_GENERIC_FIND_NEXT_BIT */ |
#endif /* __KERNEL__ */ |
#endif |
/drivers/include/linux/byteorder/generic.h |
---|
0,0 → 1,173 |
#ifndef _LINUX_BYTEORDER_GENERIC_H |
#define _LINUX_BYTEORDER_GENERIC_H |
/* |
* linux/byteorder_generic.h |
* Generic Byte-reordering support |
* |
* The "... p" macros, like le64_to_cpup, can be used with pointers |
* to unaligned data, but there will be a performance penalty on |
* some architectures. Use get_unaligned for unaligned data. |
* |
* Francois-Rene Rideau <fare@tunes.org> 19970707 |
* gathered all the good ideas from all asm-foo/byteorder.h into one file, |
* cleaned them up. |
* I hope it is compliant with non-GCC compilers. |
* I decided to put __BYTEORDER_HAS_U64__ in byteorder.h, |
* because I wasn't sure it would be ok to put it in types.h |
* Upgraded it to 2.1.43 |
* Francois-Rene Rideau <fare@tunes.org> 19971012 |
* Upgraded it to 2.1.57 |
* to please Linus T., replaced huge #ifdef's between little/big endian |
* by nestedly #include'd files. |
* Francois-Rene Rideau <fare@tunes.org> 19971205 |
* Made it to 2.1.71; now a facelift: |
* Put files under include/linux/byteorder/ |
* Split swab from generic support. |
* |
* TODO: |
* = Regular kernel maintainers could also replace all these manual |
* byteswap macros that remain, disseminated among drivers, |
* after some grep or the sources... |
* = Linus might want to rename all these macros and files to fit his taste, |
* to fit his personal naming scheme. |
* = it seems that a few drivers would also appreciate |
* nybble swapping support... |
* = every architecture could add their byteswap macro in asm/byteorder.h |
* see how some architectures already do (i386, alpha, ppc, etc) |
* = cpu_to_beXX and beXX_to_cpu might some day need to be well |
* distinguished throughout the kernel. This is not the case currently, |
* since little endian, big endian, and pdp endian machines needn't it. |
* But this might be the case for, say, a port of Linux to 20/21 bit |
* architectures (and F21 Linux addict around?). |
*/ |
/* |
* The following macros are to be defined by <asm/byteorder.h>: |
* |
* Conversion of long and short int between network and host format |
* ntohl(__u32 x) |
* ntohs(__u16 x) |
* htonl(__u32 x) |
* htons(__u16 x) |
* It seems that some programs (which? where? or perhaps a standard? POSIX?) |
* might like the above to be functions, not macros (why?). |
* if that's true, then detect them, and take measures. |
* Anyway, the measure is: define only ___ntohl as a macro instead, |
* and in a separate file, have |
* unsigned long inline ntohl(x){return ___ntohl(x);} |
* |
* The same for constant arguments |
* __constant_ntohl(__u32 x) |
* __constant_ntohs(__u16 x) |
* __constant_htonl(__u32 x) |
* __constant_htons(__u16 x) |
* |
* Conversion of XX-bit integers (16- 32- or 64-) |
* between native CPU format and little/big endian format |
* 64-bit stuff only defined for proper architectures |
* cpu_to_[bl]eXX(__uXX x) |
* [bl]eXX_to_cpu(__uXX x) |
* |
* The same, but takes a pointer to the value to convert |
* cpu_to_[bl]eXXp(__uXX x) |
* [bl]eXX_to_cpup(__uXX x) |
* |
* The same, but change in situ |
* cpu_to_[bl]eXXs(__uXX x) |
* [bl]eXX_to_cpus(__uXX x) |
* |
* See asm-foo/byteorder.h for examples of how to provide |
* architecture-optimized versions |
* |
*/ |
#define cpu_to_le64 __cpu_to_le64 |
#define le64_to_cpu __le64_to_cpu |
#define cpu_to_le32 __cpu_to_le32 |
#define le32_to_cpu __le32_to_cpu |
#define cpu_to_le16 __cpu_to_le16 |
#define le16_to_cpu __le16_to_cpu |
#define cpu_to_be64 __cpu_to_be64 |
#define be64_to_cpu __be64_to_cpu |
#define cpu_to_be32 __cpu_to_be32 |
#define be32_to_cpu __be32_to_cpu |
#define cpu_to_be16 __cpu_to_be16 |
#define be16_to_cpu __be16_to_cpu |
#define cpu_to_le64p __cpu_to_le64p |
#define le64_to_cpup __le64_to_cpup |
#define cpu_to_le32p __cpu_to_le32p |
#define le32_to_cpup __le32_to_cpup |
#define cpu_to_le16p __cpu_to_le16p |
#define le16_to_cpup __le16_to_cpup |
#define cpu_to_be64p __cpu_to_be64p |
#define be64_to_cpup __be64_to_cpup |
#define cpu_to_be32p __cpu_to_be32p |
#define be32_to_cpup __be32_to_cpup |
#define cpu_to_be16p __cpu_to_be16p |
#define be16_to_cpup __be16_to_cpup |
#define cpu_to_le64s __cpu_to_le64s |
#define le64_to_cpus __le64_to_cpus |
#define cpu_to_le32s __cpu_to_le32s |
#define le32_to_cpus __le32_to_cpus |
#define cpu_to_le16s __cpu_to_le16s |
#define le16_to_cpus __le16_to_cpus |
#define cpu_to_be64s __cpu_to_be64s |
#define be64_to_cpus __be64_to_cpus |
#define cpu_to_be32s __cpu_to_be32s |
#define be32_to_cpus __be32_to_cpus |
#define cpu_to_be16s __cpu_to_be16s |
#define be16_to_cpus __be16_to_cpus |
/* |
* They have to be macros in order to do the constant folding |
* correctly - if the argument passed into a inline function |
* it is no longer constant according to gcc.. |
*/ |
#undef ntohl |
#undef ntohs |
#undef htonl |
#undef htons |
#define ___htonl(x) __cpu_to_be32(x) |
#define ___htons(x) __cpu_to_be16(x) |
#define ___ntohl(x) __be32_to_cpu(x) |
#define ___ntohs(x) __be16_to_cpu(x) |
#define htonl(x) ___htonl(x) |
#define ntohl(x) ___ntohl(x) |
#define htons(x) ___htons(x) |
#define ntohs(x) ___ntohs(x) |
static inline void le16_add_cpu(__le16 *var, u16 val) |
{ |
*var = cpu_to_le16(le16_to_cpu(*var) + val); |
} |
static inline void le32_add_cpu(__le32 *var, u32 val) |
{ |
*var = cpu_to_le32(le32_to_cpu(*var) + val); |
} |
static inline void le64_add_cpu(__le64 *var, u64 val) |
{ |
*var = cpu_to_le64(le64_to_cpu(*var) + val); |
} |
static inline void be16_add_cpu(__be16 *var, u16 val) |
{ |
*var = cpu_to_be16(be16_to_cpu(*var) + val); |
} |
static inline void be32_add_cpu(__be32 *var, u32 val) |
{ |
*var = cpu_to_be32(be32_to_cpu(*var) + val); |
} |
static inline void be64_add_cpu(__be64 *var, u64 val) |
{ |
*var = cpu_to_be64(be64_to_cpu(*var) + val); |
} |
#endif /* _LINUX_BYTEORDER_GENERIC_H */ |
/drivers/include/linux/byteorder/little_endian.h |
---|
0,0 → 1,108 |
#ifndef _LINUX_BYTEORDER_LITTLE_ENDIAN_H |
#define _LINUX_BYTEORDER_LITTLE_ENDIAN_H |
#ifndef __LITTLE_ENDIAN |
#define __LITTLE_ENDIAN 1234 |
#endif |
#ifndef __LITTLE_ENDIAN_BITFIELD |
#define __LITTLE_ENDIAN_BITFIELD |
#endif |
#include <linux/types.h> |
#include <linux/swab.h> |
#define __constant_htonl(x) ((__force __be32)___constant_swab32((x))) |
#define __constant_ntohl(x) ___constant_swab32((__force __be32)(x)) |
#define __constant_htons(x) ((__force __be16)___constant_swab16((x))) |
#define __constant_ntohs(x) ___constant_swab16((__force __be16)(x)) |
#define __constant_cpu_to_le64(x) ((__force __le64)(__u64)(x)) |
#define __constant_le64_to_cpu(x) ((__force __u64)(__le64)(x)) |
#define __constant_cpu_to_le32(x) ((__force __le32)(__u32)(x)) |
#define __constant_le32_to_cpu(x) ((__force __u32)(__le32)(x)) |
#define __constant_cpu_to_le16(x) ((__force __le16)(__u16)(x)) |
#define __constant_le16_to_cpu(x) ((__force __u16)(__le16)(x)) |
#define __constant_cpu_to_be64(x) ((__force __be64)___constant_swab64((x))) |
#define __constant_be64_to_cpu(x) ___constant_swab64((__force __u64)(__be64)(x)) |
#define __constant_cpu_to_be32(x) ((__force __be32)___constant_swab32((x))) |
#define __constant_be32_to_cpu(x) ___constant_swab32((__force __u32)(__be32)(x)) |
#define __constant_cpu_to_be16(x) ((__force __be16)___constant_swab16((x))) |
#define __constant_be16_to_cpu(x) ___constant_swab16((__force __u16)(__be16)(x)) |
#define __cpu_to_le64(x) ((__force __le64)(__u64)(x)) |
#define __le64_to_cpu(x) ((__force __u64)(__le64)(x)) |
#define __cpu_to_le32(x) ((__force __le32)(__u32)(x)) |
#define __le32_to_cpu(x) ((__force __u32)(__le32)(x)) |
#define __cpu_to_le16(x) ((__force __le16)(__u16)(x)) |
#define __le16_to_cpu(x) ((__force __u16)(__le16)(x)) |
#define __cpu_to_be64(x) ((__force __be64)__swab64((x))) |
#define __be64_to_cpu(x) __swab64((__force __u64)(__be64)(x)) |
#define __cpu_to_be32(x) ((__force __be32)__swab32((x))) |
#define __be32_to_cpu(x) __swab32((__force __u32)(__be32)(x)) |
#define __cpu_to_be16(x) ((__force __be16)__swab16((x))) |
#define __be16_to_cpu(x) __swab16((__force __u16)(__be16)(x)) |
static inline __le64 __cpu_to_le64p(const __u64 *p) |
{ |
return (__force __le64)*p; |
} |
static inline __u64 __le64_to_cpup(const __le64 *p) |
{ |
return (__force __u64)*p; |
} |
static inline __le32 __cpu_to_le32p(const __u32 *p) |
{ |
return (__force __le32)*p; |
} |
static inline __u32 __le32_to_cpup(const __le32 *p) |
{ |
return (__force __u32)*p; |
} |
static inline __le16 __cpu_to_le16p(const __u16 *p) |
{ |
return (__force __le16)*p; |
} |
static inline __u16 __le16_to_cpup(const __le16 *p) |
{ |
return (__force __u16)*p; |
} |
static inline __be64 __cpu_to_be64p(const __u64 *p) |
{ |
return (__force __be64)__swab64p(p); |
} |
static inline __u64 __be64_to_cpup(const __be64 *p) |
{ |
return __swab64p((__u64 *)p); |
} |
static inline __be32 __cpu_to_be32p(const __u32 *p) |
{ |
return (__force __be32)__swab32p(p); |
} |
static inline __u32 __be32_to_cpup(const __be32 *p) |
{ |
return __swab32p((__u32 *)p); |
} |
static inline __be16 __cpu_to_be16p(const __u16 *p) |
{ |
return (__force __be16)__swab16p(p); |
} |
static inline __u16 __be16_to_cpup(const __be16 *p) |
{ |
return __swab16p((__u16 *)p); |
} |
#define __cpu_to_le64s(x) do { (void)(x); } while (0) |
#define __le64_to_cpus(x) do { (void)(x); } while (0) |
#define __cpu_to_le32s(x) do { (void)(x); } while (0) |
#define __le32_to_cpus(x) do { (void)(x); } while (0) |
#define __cpu_to_le16s(x) do { (void)(x); } while (0) |
#define __le16_to_cpus(x) do { (void)(x); } while (0) |
#define __cpu_to_be64s(x) __swab64s((x)) |
#define __be64_to_cpus(x) __swab64s((x)) |
#define __cpu_to_be32s(x) __swab32s((x)) |
#define __be32_to_cpus(x) __swab32s((x)) |
#define __cpu_to_be16s(x) __swab16s((x)) |
#define __be16_to_cpus(x) __swab16s((x)) |
#ifdef __KERNEL__ |
#include <linux/byteorder/generic.h> |
#endif |
#endif /* _LINUX_BYTEORDER_LITTLE_ENDIAN_H */ |
/drivers/include/linux/compiler-gcc.h |
---|
0,0 → 1,87 |
#ifndef __LINUX_COMPILER_H |
#error "Please don't include <linux/compiler-gcc.h> directly, include <linux/compiler.h> instead." |
#endif |
/* |
* Common definitions for all gcc versions go here. |
*/ |
/* Optimization barrier */ |
/* The "volatile" is due to gcc bugs */ |
#define barrier() __asm__ __volatile__("": : :"memory") |
/* |
* This macro obfuscates arithmetic on a variable address so that gcc |
* shouldn't recognize the original var, and make assumptions about it. |
* |
* This is needed because the C standard makes it undefined to do |
* pointer arithmetic on "objects" outside their boundaries and the |
* gcc optimizers assume this is the case. In particular they |
* assume such arithmetic does not wrap. |
* |
* A miscompilation has been observed because of this on PPC. |
* To work around it we hide the relationship of the pointer and the object |
* using this macro. |
* |
* Versions of the ppc64 compiler before 4.1 had a bug where use of |
* RELOC_HIDE could trash r30. The bug can be worked around by changing |
* the inline assembly constraint from =g to =r, in this particular |
* case either is valid. |
*/ |
#define RELOC_HIDE(ptr, off) \ |
({ unsigned long __ptr; \ |
__asm__ ("" : "=r"(__ptr) : "0"(ptr)); \ |
(typeof(ptr)) (__ptr + (off)); }) |
/* &a[0] degrades to a pointer: a different type from an array */ |
#define __must_be_array(a) \ |
BUILD_BUG_ON_ZERO(__builtin_types_compatible_p(typeof(a), typeof(&a[0]))) |
/* |
* Force always-inline if the user requests it so via the .config, |
* or if gcc is too old: |
*/ |
#if !defined(CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING) || \ |
!defined(CONFIG_OPTIMIZE_INLINING) || (__GNUC__ < 4) |
# define inline inline __attribute__((always_inline)) |
# define __inline__ __inline__ __attribute__((always_inline)) |
# define __inline __inline __attribute__((always_inline)) |
#endif |
#define __deprecated __attribute__((deprecated)) |
#define __packed __attribute__((packed)) |
#define __weak __attribute__((weak)) |
/* |
* it doesn't make sense on ARM (currently the only user of __naked) to trace |
* naked functions because then mcount is called without stack and frame pointer |
* being set up and there is no chance to restore the lr register to the value |
* before mcount was called. |
*/ |
#define __naked __attribute__((naked)) notrace |
#define __noreturn __attribute__((noreturn)) |
/* |
* From the GCC manual: |
* |
* Many functions have no effects except the return value and their |
* return value depends only on the parameters and/or global |
* variables. Such a function can be subject to common subexpression |
* elimination and loop optimization just as an arithmetic operator |
* would be. |
* [...] |
*/ |
#define __pure __attribute__((pure)) |
#define __aligned(x) __attribute__((aligned(x))) |
#define __printf(a,b) __attribute__((format(printf,a,b))) |
#define noinline __attribute__((noinline)) |
#define __attribute_const__ __attribute__((__const__)) |
#define __maybe_unused __attribute__((unused)) |
#define __always_unused __attribute__((unused)) |
#define __gcc_header(x) #x |
#define _gcc_header(x) __gcc_header(linux/compiler-gcc##x.h) |
#define gcc_header(x) _gcc_header(x) |
#include gcc_header(__GNUC__) |
/drivers/include/linux/compiler-gcc4.h |
---|
0,0 → 1,61 |
#ifndef __LINUX_COMPILER_H |
#error "Please don't include <linux/compiler-gcc4.h> directly, include <linux/compiler.h> instead." |
#endif |
/* GCC 4.1.[01] miscompiles __weak */ |
#ifdef __KERNEL__ |
# if __GNUC_MINOR__ == 1 && __GNUC_PATCHLEVEL__ <= 1 |
# error Your version of gcc miscompiles the __weak directive |
# endif |
#endif |
#define __used __attribute__((__used__)) |
#define __must_check __attribute__((warn_unused_result)) |
#define __compiler_offsetof(a,b) __builtin_offsetof(a,b) |
#define __always_inline inline __attribute__((always_inline)) |
/* |
* A trick to suppress uninitialized variable warning without generating any |
* code |
*/ |
#define uninitialized_var(x) x = x |
#if __GNUC_MINOR__ >= 3 |
/* Mark functions as cold. gcc will assume any path leading to a call |
to them will be unlikely. This means a lot of manual unlikely()s |
are unnecessary now for any paths leading to the usual suspects |
like BUG(), printk(), panic() etc. [but let's keep them for now for |
older compilers] |
Early snapshots of gcc 4.3 don't support this and we can't detect this |
in the preprocessor, but we can live with this because they're unreleased. |
Maketime probing would be overkill here. |
gcc also has a __attribute__((__hot__)) to move hot functions into |
a special section, but I don't see any sense in this right now in |
the kernel context */ |
#define __cold __attribute__((__cold__)) |
#if __GNUC_MINOR__ >= 5 |
/* |
* Mark a position in code as unreachable. This can be used to |
* suppress control flow warnings after asm blocks that transfer |
* control elsewhere. |
* |
* Early snapshots of gcc 4.5 don't support this and we can't detect |
* this in the preprocessor, but we can live with this because they're |
* unreleased. Really, we need to have autoconf for the kernel. |
*/ |
#define unreachable() __builtin_unreachable() |
#endif |
#endif |
#if __GNUC_MINOR__ > 0 |
#define __compiletime_object_size(obj) __builtin_object_size(obj, 0) |
#endif |
#if __GNUC_MINOR__ >= 4 |
#define __compiletime_warning(message) __attribute__((warning(message))) |
#define __compiletime_error(message) __attribute__((error(message))) |
#endif |
/drivers/include/linux/compiler.h |
---|
0,0 → 1,303 |
#ifndef __LINUX_COMPILER_H |
#define __LINUX_COMPILER_H |
#ifndef __ASSEMBLY__ |
#ifdef __CHECKER__ |
# define __user __attribute__((noderef, address_space(1))) |
# define __kernel /* default address space */ |
# define __safe __attribute__((safe)) |
# define __force __attribute__((force)) |
# define __nocast __attribute__((nocast)) |
# define __iomem __attribute__((noderef, address_space(2))) |
# define __acquires(x) __attribute__((context(x,0,1))) |
# define __releases(x) __attribute__((context(x,1,0))) |
# define __acquire(x) __context__(x,1) |
# define __release(x) __context__(x,-1) |
# define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0) |
extern void __chk_user_ptr(const volatile void __user *); |
extern void __chk_io_ptr(const volatile void __iomem *); |
#else |
# define __user |
# define __kernel |
# define __safe |
# define __force |
# define __nocast |
# define __iomem |
# define __chk_user_ptr(x) (void)0 |
# define __chk_io_ptr(x) (void)0 |
# define __builtin_warning(x, y...) (1) |
# define __acquires(x) |
# define __releases(x) |
# define __acquire(x) (void)0 |
# define __release(x) (void)0 |
# define __cond_lock(x,c) (c) |
#endif |
#ifdef __KERNEL__ |
#ifdef __GNUC__ |
#include <linux/compiler-gcc.h> |
#endif |
#define notrace __attribute__((no_instrument_function)) |
/* Intel compiler defines __GNUC__. So we will overwrite implementations |
* coming from above header files here |
*/ |
#ifdef __INTEL_COMPILER |
# include <linux/compiler-intel.h> |
#endif |
/* |
* Generic compiler-dependent macros required for kernel |
* build go below this comment. Actual compiler/compiler version |
* specific implementations come from the above header files |
*/ |
struct ftrace_branch_data { |
const char *func; |
const char *file; |
unsigned line; |
union { |
struct { |
unsigned long correct; |
unsigned long incorrect; |
}; |
struct { |
unsigned long miss; |
unsigned long hit; |
}; |
unsigned long miss_hit[2]; |
}; |
}; |
/* |
* Note: DISABLE_BRANCH_PROFILING can be used by special lowlevel code |
* to disable branch tracing on a per file basis. |
*/ |
#if defined(CONFIG_TRACE_BRANCH_PROFILING) \ |
&& !defined(DISABLE_BRANCH_PROFILING) && !defined(__CHECKER__) |
void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect); |
#define likely_notrace(x) __builtin_expect(!!(x), 1) |
#define unlikely_notrace(x) __builtin_expect(!!(x), 0) |
#define __branch_check__(x, expect) ({ \ |
int ______r; \ |
static struct ftrace_branch_data \ |
__attribute__((__aligned__(4))) \ |
__attribute__((section("_ftrace_annotated_branch"))) \ |
______f = { \ |
.func = __func__, \ |
.file = __FILE__, \ |
.line = __LINE__, \ |
}; \ |
______r = likely_notrace(x); \ |
ftrace_likely_update(&______f, ______r, expect); \ |
______r; \ |
}) |
/* |
* Using __builtin_constant_p(x) to ignore cases where the return |
* value is always the same. This idea is taken from a similar patch |
* written by Daniel Walker. |
*/ |
# ifndef likely |
# define likely(x) (__builtin_constant_p(x) ? !!(x) : __branch_check__(x, 1)) |
# endif |
# ifndef unlikely |
# define unlikely(x) (__builtin_constant_p(x) ? !!(x) : __branch_check__(x, 0)) |
# endif |
#ifdef CONFIG_PROFILE_ALL_BRANCHES |
/* |
* "Define 'is'", Bill Clinton |
* "Define 'if'", Steven Rostedt |
*/ |
#define if(cond, ...) __trace_if( (cond , ## __VA_ARGS__) ) |
#define __trace_if(cond) \ |
if (__builtin_constant_p((cond)) ? !!(cond) : \ |
({ \ |
int ______r; \ |
static struct ftrace_branch_data \ |
__attribute__((__aligned__(4))) \ |
__attribute__((section("_ftrace_branch"))) \ |
______f = { \ |
.func = __func__, \ |
.file = __FILE__, \ |
.line = __LINE__, \ |
}; \ |
______r = !!(cond); \ |
______f.miss_hit[______r]++; \ |
______r; \ |
})) |
#endif /* CONFIG_PROFILE_ALL_BRANCHES */ |
#else |
# define likely(x) __builtin_expect(!!(x), 1) |
# define unlikely(x) __builtin_expect(!!(x), 0) |
#endif |
/* Optimization barrier */ |
#ifndef barrier |
# define barrier() __memory_barrier() |
#endif |
/* Unreachable code */ |
#ifndef unreachable |
# define unreachable() do { } while (1) |
#endif |
#ifndef RELOC_HIDE |
# define RELOC_HIDE(ptr, off) \ |
({ unsigned long __ptr; \ |
__ptr = (unsigned long) (ptr); \ |
(typeof(ptr)) (__ptr + (off)); }) |
#endif |
#endif /* __KERNEL__ */ |
#endif /* __ASSEMBLY__ */ |
#ifdef __KERNEL__ |
/* |
* Allow us to mark functions as 'deprecated' and have gcc emit a nice |
* warning for each use, in hopes of speeding the functions removal. |
* Usage is: |
* int __deprecated foo(void) |
*/ |
#ifndef __deprecated |
# define __deprecated /* unimplemented */ |
#endif |
#ifdef MODULE |
#define __deprecated_for_modules __deprecated |
#else |
#define __deprecated_for_modules |
#endif |
#ifndef __must_check |
#define __must_check |
#endif |
#ifndef CONFIG_ENABLE_MUST_CHECK |
#undef __must_check |
#define __must_check |
#endif |
#ifndef CONFIG_ENABLE_WARN_DEPRECATED |
#undef __deprecated |
#undef __deprecated_for_modules |
#define __deprecated |
#define __deprecated_for_modules |
#endif |
/* |
* Allow us to avoid 'defined but not used' warnings on functions and data, |
* as well as force them to be emitted to the assembly file. |
* |
* As of gcc 3.4, static functions that are not marked with attribute((used)) |
* may be elided from the assembly file. As of gcc 3.4, static data not so |
* marked will not be elided, but this may change in a future gcc version. |
* |
* NOTE: Because distributions shipped with a backported unit-at-a-time |
* compiler in gcc 3.3, we must define __used to be __attribute__((used)) |
* for gcc >=3.3 instead of 3.4. |
* |
* In prior versions of gcc, such functions and data would be emitted, but |
* would be warned about except with attribute((unused)). |
* |
* Mark functions that are referenced only in inline assembly as __used so |
* the code is emitted even though it appears to be unreferenced. |
*/ |
#ifndef __used |
# define __used /* unimplemented */ |
#endif |
#ifndef __maybe_unused |
# define __maybe_unused /* unimplemented */ |
#endif |
#ifndef __always_unused |
# define __always_unused /* unimplemented */ |
#endif |
#ifndef noinline |
#define noinline |
#endif |
/* |
* Rather then using noinline to prevent stack consumption, use |
* noinline_for_stack instead. For documentaiton reasons. |
*/ |
#define noinline_for_stack noinline |
#ifndef __always_inline |
#define __always_inline inline |
#endif |
#endif /* __KERNEL__ */ |
/* |
* From the GCC manual: |
* |
* Many functions do not examine any values except their arguments, |
* and have no effects except the return value. Basically this is |
* just slightly more strict class than the `pure' attribute above, |
* since function is not allowed to read global memory. |
* |
* Note that a function that has pointer arguments and examines the |
* data pointed to must _not_ be declared `const'. Likewise, a |
* function that calls a non-`const' function usually must not be |
* `const'. It does not make sense for a `const' function to return |
* `void'. |
*/ |
#ifndef __attribute_const__ |
# define __attribute_const__ /* unimplemented */ |
#endif |
/* |
* Tell gcc if a function is cold. The compiler will assume any path |
* directly leading to the call is unlikely. |
*/ |
#ifndef __cold |
#define __cold |
#endif |
/* Simple shorthand for a section definition */ |
#ifndef __section |
# define __section(S) __attribute__ ((__section__(#S))) |
#endif |
/* Are two types/vars the same type (ignoring qualifiers)? */ |
#ifndef __same_type |
# define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b)) |
#endif |
/* Compile time object size, -1 for unknown */ |
#ifndef __compiletime_object_size |
# define __compiletime_object_size(obj) -1 |
#endif |
#ifndef __compiletime_warning |
# define __compiletime_warning(message) |
#endif |
#ifndef __compiletime_error |
# define __compiletime_error(message) |
#endif |
/* |
* Prevent the compiler from merging or refetching accesses. The compiler |
* is also forbidden from reordering successive instances of ACCESS_ONCE(), |
* but only when the compiler is aware of some particular ordering. One way |
* to make the compiler aware of ordering is to put the two invocations of |
* ACCESS_ONCE() in different C statements. |
* |
* This macro does absolutely -nothing- to prevent the CPU from reordering, |
* merging, or refetching absolutely anything at any time. Its main intended |
* use is to mediate communication between process-level code and irq/NMI |
* handlers, all running on the same CPU. |
*/ |
#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x)) |
#endif /* __LINUX_COMPILER_H */ |
/drivers/include/linux/errno.h |
---|
0,0 → 1,114 |
#ifndef _ASM_GENERIC_ERRNO_H |
#define _ASM_GENERIC_ERRNO_H |
#include <errno-base.h> |
#define ERESTARTSYS 512 |
#define EDEADLK 35 /* Resource deadlock would occur */ |
#define ENAMETOOLONG 36 /* File name too long */ |
#define ENOLCK 37 /* No record locks available */ |
#define ENOSYS 38 /* Function not implemented */ |
#define ENOTEMPTY 39 /* Directory not empty */ |
#define ELOOP 40 /* Too many symbolic links encountered */ |
#define EWOULDBLOCK EAGAIN /* Operation would block */ |
#define ENOMSG 42 /* No message of desired type */ |
#define EIDRM 43 /* Identifier removed */ |
#define ECHRNG 44 /* Channel number out of range */ |
#define EL2NSYNC 45 /* Level 2 not synchronized */ |
#define EL3HLT 46 /* Level 3 halted */ |
#define EL3RST 47 /* Level 3 reset */ |
#define ELNRNG 48 /* Link number out of range */ |
#define EUNATCH 49 /* Protocol driver not attached */ |
#define ENOCSI 50 /* No CSI structure available */ |
#define EL2HLT 51 /* Level 2 halted */ |
#define EBADE 52 /* Invalid exchange */ |
#define EBADR 53 /* Invalid request descriptor */ |
#define EXFULL 54 /* Exchange full */ |
#define ENOANO 55 /* No anode */ |
#define EBADRQC 56 /* Invalid request code */ |
#define EBADSLT 57 /* Invalid slot */ |
#define EDEADLOCK EDEADLK |
#define EBFONT 59 /* Bad font file format */ |
#define ENOSTR 60 /* Device not a stream */ |
#define ENODATA 61 /* No data available */ |
#define ETIME 62 /* Timer expired */ |
#define ENOSR 63 /* Out of streams resources */ |
#define ENONET 64 /* Machine is not on the network */ |
#define ENOPKG 65 /* Package not installed */ |
#define EREMOTE 66 /* Object is remote */ |
#define ENOLINK 67 /* Link has been severed */ |
#define EADV 68 /* Advertise error */ |
#define ESRMNT 69 /* Srmount error */ |
#define ECOMM 70 /* Communication error on send */ |
#define EPROTO 71 /* Protocol error */ |
#define EMULTIHOP 72 /* Multihop attempted */ |
#define EDOTDOT 73 /* RFS specific error */ |
#define EBADMSG 74 /* Not a data message */ |
#define EOVERFLOW 75 /* Value too large for defined data type */ |
#define ENOTUNIQ 76 /* Name not unique on network */ |
#define EBADFD 77 /* File descriptor in bad state */ |
#define EREMCHG 78 /* Remote address changed */ |
#define ELIBACC 79 /* Can not access a needed shared library */ |
#define ELIBBAD 80 /* Accessing a corrupted shared library */ |
#define ELIBSCN 81 /* .lib section in a.out corrupted */ |
#define ELIBMAX 82 /* Attempting to link in too many shared libraries */ |
#define ELIBEXEC 83 /* Cannot exec a shared library directly */ |
#define EILSEQ 84 /* Illegal byte sequence */ |
#define ERESTART 85 /* Interrupted system call should be restarted */ |
#define ESTRPIPE 86 /* Streams pipe error */ |
#define EUSERS 87 /* Too many users */ |
#define ENOTSOCK 88 /* Socket operation on non-socket */ |
#define EDESTADDRREQ 89 /* Destination address required */ |
#define EMSGSIZE 90 /* Message too long */ |
#define EPROTOTYPE 91 /* Protocol wrong type for socket */ |
#define ENOPROTOOPT 92 /* Protocol not available */ |
#define EPROTONOSUPPORT 93 /* Protocol not supported */ |
#define ESOCKTNOSUPPORT 94 /* Socket type not supported */ |
#define EOPNOTSUPP 95 /* Operation not supported on transport endpoint */ |
#define EPFNOSUPPORT 96 /* Protocol family not supported */ |
#define EAFNOSUPPORT 97 /* Address family not supported by protocol */ |
#define EADDRINUSE 98 /* Address already in use */ |
#define EADDRNOTAVAIL 99 /* Cannot assign requested address */ |
#define ENETDOWN 100 /* Network is down */ |
#define ENETUNREACH 101 /* Network is unreachable */ |
#define ENETRESET 102 /* Network dropped connection because of reset */ |
#define ECONNABORTED 103 /* Software caused connection abort */ |
#define ECONNRESET 104 /* Connection reset by peer */ |
#define ENOBUFS 105 /* No buffer space available */ |
#define EISCONN 106 /* Transport endpoint is already connected */ |
#define ENOTCONN 107 /* Transport endpoint is not connected */ |
#define ESHUTDOWN 108 /* Cannot send after transport endpoint shutdown */ |
#define ETOOMANYREFS 109 /* Too many references: cannot splice */ |
#define ETIMEDOUT 110 /* Connection timed out */ |
#define ECONNREFUSED 111 /* Connection refused */ |
#define EHOSTDOWN 112 /* Host is down */ |
#define EHOSTUNREACH 113 /* No route to host */ |
#define EALREADY 114 /* Operation already in progress */ |
#define EINPROGRESS 115 /* Operation now in progress */ |
#define ESTALE 116 /* Stale NFS file handle */ |
#define EUCLEAN 117 /* Structure needs cleaning */ |
#define ENOTNAM 118 /* Not a XENIX named type file */ |
#define ENAVAIL 119 /* No XENIX semaphores available */ |
#define EISNAM 120 /* Is a named type file */ |
#define EREMOTEIO 121 /* Remote I/O error */ |
#define EDQUOT 122 /* Quota exceeded */ |
#define ENOMEDIUM 123 /* No medium found */ |
#define EMEDIUMTYPE 124 /* Wrong medium type */ |
#define ECANCELED 125 /* Operation Canceled */ |
#define ENOKEY 126 /* Required key not available */ |
#define EKEYEXPIRED 127 /* Key has expired */ |
#define EKEYREVOKED 128 /* Key has been revoked */ |
#define EKEYREJECTED 129 /* Key was rejected by service */ |
/* for robust mutexes */ |
#define EOWNERDEAD 130 /* Owner died */ |
#define ENOTRECOVERABLE 131 /* State not recoverable */ |
#define ERFKILL 132 /* Operation not possible due to RF-kill */ |
#endif |
/drivers/include/linux/fb.h |
---|
0,0 → 1,1055 |
#ifndef _LINUX_FB_H |
#define _LINUX_FB_H |
#include <linux/types.h> |
#include <list.h> |
#include <linux/i2c.h> |
struct dentry; |
/* Definitions of frame buffers */ |
#define FB_MAX 32 /* sufficient for now */ |
#define FB_TYPE_PACKED_PIXELS 0 /* Packed Pixels */ |
#define FB_TYPE_PLANES 1 /* Non interleaved planes */ |
#define FB_TYPE_INTERLEAVED_PLANES 2 /* Interleaved planes */ |
#define FB_TYPE_TEXT 3 /* Text/attributes */ |
#define FB_TYPE_VGA_PLANES 4 /* EGA/VGA planes */ |
#define FB_AUX_TEXT_MDA 0 /* Monochrome text */ |
#define FB_AUX_TEXT_CGA 1 /* CGA/EGA/VGA Color text */ |
#define FB_AUX_TEXT_S3_MMIO 2 /* S3 MMIO fasttext */ |
#define FB_AUX_TEXT_MGA_STEP16 3 /* MGA Millenium I: text, attr, 14 reserved bytes */ |
#define FB_AUX_TEXT_MGA_STEP8 4 /* other MGAs: text, attr, 6 reserved bytes */ |
#define FB_AUX_TEXT_SVGA_GROUP 8 /* 8-15: SVGA tileblit compatible modes */ |
#define FB_AUX_TEXT_SVGA_MASK 7 /* lower three bits says step */ |
#define FB_AUX_TEXT_SVGA_STEP2 8 /* SVGA text mode: text, attr */ |
#define FB_AUX_TEXT_SVGA_STEP4 9 /* SVGA text mode: text, attr, 2 reserved bytes */ |
#define FB_AUX_TEXT_SVGA_STEP8 10 /* SVGA text mode: text, attr, 6 reserved bytes */ |
#define FB_AUX_TEXT_SVGA_STEP16 11 /* SVGA text mode: text, attr, 14 reserved bytes */ |
#define FB_AUX_TEXT_SVGA_LAST 15 /* reserved up to 15 */ |
#define FB_AUX_VGA_PLANES_VGA4 0 /* 16 color planes (EGA/VGA) */ |
#define FB_AUX_VGA_PLANES_CFB4 1 /* CFB4 in planes (VGA) */ |
#define FB_AUX_VGA_PLANES_CFB8 2 /* CFB8 in planes (VGA) */ |
#define FB_VISUAL_MONO01 0 /* Monochr. 1=Black 0=White */ |
#define FB_VISUAL_MONO10 1 /* Monochr. 1=White 0=Black */ |
#define FB_VISUAL_TRUECOLOR 2 /* True color */ |
#define FB_VISUAL_PSEUDOCOLOR 3 /* Pseudo color (like atari) */ |
#define FB_VISUAL_DIRECTCOLOR 4 /* Direct color */ |
#define FB_VISUAL_STATIC_PSEUDOCOLOR 5 /* Pseudo color readonly */ |
#define FB_ACCEL_NONE 0 /* no hardware accelerator */ |
#define FB_ACCEL_ATARIBLITT 1 /* Atari Blitter */ |
#define FB_ACCEL_AMIGABLITT 2 /* Amiga Blitter */ |
#define FB_ACCEL_S3_TRIO64 3 /* Cybervision64 (S3 Trio64) */ |
#define FB_ACCEL_NCR_77C32BLT 4 /* RetinaZ3 (NCR 77C32BLT) */ |
#define FB_ACCEL_S3_VIRGE 5 /* Cybervision64/3D (S3 ViRGE) */ |
#define FB_ACCEL_ATI_MACH64GX 6 /* ATI Mach 64GX family */ |
#define FB_ACCEL_DEC_TGA 7 /* DEC 21030 TGA */ |
#define FB_ACCEL_ATI_MACH64CT 8 /* ATI Mach 64CT family */ |
#define FB_ACCEL_ATI_MACH64VT 9 /* ATI Mach 64CT family VT class */ |
#define FB_ACCEL_ATI_MACH64GT 10 /* ATI Mach 64CT family GT class */ |
#define FB_ACCEL_SUN_CREATOR 11 /* Sun Creator/Creator3D */ |
#define FB_ACCEL_SUN_CGSIX 12 /* Sun cg6 */ |
#define FB_ACCEL_SUN_LEO 13 /* Sun leo/zx */ |
#define FB_ACCEL_IMS_TWINTURBO 14 /* IMS Twin Turbo */ |
#define FB_ACCEL_3DLABS_PERMEDIA2 15 /* 3Dlabs Permedia 2 */ |
#define FB_ACCEL_MATROX_MGA2064W 16 /* Matrox MGA2064W (Millenium) */ |
#define FB_ACCEL_MATROX_MGA1064SG 17 /* Matrox MGA1064SG (Mystique) */ |
#define FB_ACCEL_MATROX_MGA2164W 18 /* Matrox MGA2164W (Millenium II) */ |
#define FB_ACCEL_MATROX_MGA2164W_AGP 19 /* Matrox MGA2164W (Millenium II) */ |
#define FB_ACCEL_MATROX_MGAG100 20 /* Matrox G100 (Productiva G100) */ |
#define FB_ACCEL_MATROX_MGAG200 21 /* Matrox G200 (Myst, Mill, ...) */ |
#define FB_ACCEL_SUN_CG14 22 /* Sun cgfourteen */ |
#define FB_ACCEL_SUN_BWTWO 23 /* Sun bwtwo */ |
#define FB_ACCEL_SUN_CGTHREE 24 /* Sun cgthree */ |
#define FB_ACCEL_SUN_TCX 25 /* Sun tcx */ |
#define FB_ACCEL_MATROX_MGAG400 26 /* Matrox G400 */ |
#define FB_ACCEL_NV3 27 /* nVidia RIVA 128 */ |
#define FB_ACCEL_NV4 28 /* nVidia RIVA TNT */ |
#define FB_ACCEL_NV5 29 /* nVidia RIVA TNT2 */ |
#define FB_ACCEL_CT_6555x 30 /* C&T 6555x */ |
#define FB_ACCEL_3DFX_BANSHEE 31 /* 3Dfx Banshee */ |
#define FB_ACCEL_ATI_RAGE128 32 /* ATI Rage128 family */ |
#define FB_ACCEL_IGS_CYBER2000 33 /* CyberPro 2000 */ |
#define FB_ACCEL_IGS_CYBER2010 34 /* CyberPro 2010 */ |
#define FB_ACCEL_IGS_CYBER5000 35 /* CyberPro 5000 */ |
#define FB_ACCEL_SIS_GLAMOUR 36 /* SiS 300/630/540 */ |
#define FB_ACCEL_3DLABS_PERMEDIA3 37 /* 3Dlabs Permedia 3 */ |
#define FB_ACCEL_ATI_RADEON 38 /* ATI Radeon family */ |
#define FB_ACCEL_I810 39 /* Intel 810/815 */ |
#define FB_ACCEL_SIS_GLAMOUR_2 40 /* SiS 315, 650, 740 */ |
#define FB_ACCEL_SIS_XABRE 41 /* SiS 330 ("Xabre") */ |
#define FB_ACCEL_I830 42 /* Intel 830M/845G/85x/865G */ |
#define FB_ACCEL_NV_10 43 /* nVidia Arch 10 */ |
#define FB_ACCEL_NV_20 44 /* nVidia Arch 20 */ |
#define FB_ACCEL_NV_30 45 /* nVidia Arch 30 */ |
#define FB_ACCEL_NV_40 46 /* nVidia Arch 40 */ |
#define FB_ACCEL_XGI_VOLARI_V 47 /* XGI Volari V3XT, V5, V8 */ |
#define FB_ACCEL_XGI_VOLARI_Z 48 /* XGI Volari Z7 */ |
#define FB_ACCEL_OMAP1610 49 /* TI OMAP16xx */ |
#define FB_ACCEL_TRIDENT_TGUI 50 /* Trident TGUI */ |
#define FB_ACCEL_TRIDENT_3DIMAGE 51 /* Trident 3DImage */ |
#define FB_ACCEL_TRIDENT_BLADE3D 52 /* Trident Blade3D */ |
#define FB_ACCEL_TRIDENT_BLADEXP 53 /* Trident BladeXP */ |
#define FB_ACCEL_CIRRUS_ALPINE 53 /* Cirrus Logic 543x/544x/5480 */ |
#define FB_ACCEL_NEOMAGIC_NM2070 90 /* NeoMagic NM2070 */ |
#define FB_ACCEL_NEOMAGIC_NM2090 91 /* NeoMagic NM2090 */ |
#define FB_ACCEL_NEOMAGIC_NM2093 92 /* NeoMagic NM2093 */ |
#define FB_ACCEL_NEOMAGIC_NM2097 93 /* NeoMagic NM2097 */ |
#define FB_ACCEL_NEOMAGIC_NM2160 94 /* NeoMagic NM2160 */ |
#define FB_ACCEL_NEOMAGIC_NM2200 95 /* NeoMagic NM2200 */ |
#define FB_ACCEL_NEOMAGIC_NM2230 96 /* NeoMagic NM2230 */ |
#define FB_ACCEL_NEOMAGIC_NM2360 97 /* NeoMagic NM2360 */ |
#define FB_ACCEL_NEOMAGIC_NM2380 98 /* NeoMagic NM2380 */ |
#define FB_ACCEL_PXA3XX 99 /* PXA3xx */ |
#define FB_ACCEL_SAVAGE4 0x80 /* S3 Savage4 */ |
#define FB_ACCEL_SAVAGE3D 0x81 /* S3 Savage3D */ |
#define FB_ACCEL_SAVAGE3D_MV 0x82 /* S3 Savage3D-MV */ |
#define FB_ACCEL_SAVAGE2000 0x83 /* S3 Savage2000 */ |
#define FB_ACCEL_SAVAGE_MX_MV 0x84 /* S3 Savage/MX-MV */ |
#define FB_ACCEL_SAVAGE_MX 0x85 /* S3 Savage/MX */ |
#define FB_ACCEL_SAVAGE_IX_MV 0x86 /* S3 Savage/IX-MV */ |
#define FB_ACCEL_SAVAGE_IX 0x87 /* S3 Savage/IX */ |
#define FB_ACCEL_PROSAVAGE_PM 0x88 /* S3 ProSavage PM133 */ |
#define FB_ACCEL_PROSAVAGE_KM 0x89 /* S3 ProSavage KM133 */ |
#define FB_ACCEL_S3TWISTER_P 0x8a /* S3 Twister */ |
#define FB_ACCEL_S3TWISTER_K 0x8b /* S3 TwisterK */ |
#define FB_ACCEL_SUPERSAVAGE 0x8c /* S3 Supersavage */ |
#define FB_ACCEL_PROSAVAGE_DDR 0x8d /* S3 ProSavage DDR */ |
#define FB_ACCEL_PROSAVAGE_DDRK 0x8e /* S3 ProSavage DDR-K */ |
struct fb_fix_screeninfo { |
char id[16]; /* identification string eg "TT Builtin" */ |
unsigned long smem_start; /* Start of frame buffer mem */ |
/* (physical address) */ |
__u32 smem_len; /* Length of frame buffer mem */ |
__u32 type; /* see FB_TYPE_* */ |
__u32 type_aux; /* Interleave for interleaved Planes */ |
__u32 visual; /* see FB_VISUAL_* */ |
__u16 xpanstep; /* zero if no hardware panning */ |
__u16 ypanstep; /* zero if no hardware panning */ |
__u16 ywrapstep; /* zero if no hardware ywrap */ |
__u32 line_length; /* length of a line in bytes */ |
unsigned long mmio_start; /* Start of Memory Mapped I/O */ |
/* (physical address) */ |
__u32 mmio_len; /* Length of Memory Mapped I/O */ |
__u32 accel; /* Indicate to driver which */ |
/* specific chip/card we have */ |
__u16 reserved[3]; /* Reserved for future compatibility */ |
}; |
/* Interpretation of offset for color fields: All offsets are from the right, |
* inside a "pixel" value, which is exactly 'bits_per_pixel' wide (means: you |
* can use the offset as right argument to <<). A pixel afterwards is a bit |
* stream and is written to video memory as that unmodified. |
* |
* For pseudocolor: offset and length should be the same for all color |
* components. Offset specifies the position of the least significant bit |
* of the pallette index in a pixel value. Length indicates the number |
* of available palette entries (i.e. # of entries = 1 << length). |
*/ |
struct fb_bitfield { |
__u32 offset; /* beginning of bitfield */ |
__u32 length; /* length of bitfield */ |
__u32 msb_right; /* != 0 : Most significant bit is */ |
/* right */ |
}; |
#define FB_NONSTD_HAM 1 /* Hold-And-Modify (HAM) */ |
#define FB_NONSTD_REV_PIX_IN_B 2 /* order of pixels in each byte is reversed */ |
#define FB_ACTIVATE_NOW 0 /* set values immediately (or vbl)*/ |
#define FB_ACTIVATE_NXTOPEN 1 /* activate on next open */ |
#define FB_ACTIVATE_TEST 2 /* don't set, round up impossible */ |
#define FB_ACTIVATE_MASK 15 |
/* values */ |
#define FB_ACTIVATE_VBL 16 /* activate values on next vbl */ |
#define FB_CHANGE_CMAP_VBL 32 /* change colormap on vbl */ |
#define FB_ACTIVATE_ALL 64 /* change all VCs on this fb */ |
#define FB_ACTIVATE_FORCE 128 /* force apply even when no change*/ |
#define FB_ACTIVATE_INV_MODE 256 /* invalidate videomode */ |
#define FB_ACCELF_TEXT 1 /* (OBSOLETE) see fb_info.flags and vc_mode */ |
#define FB_SYNC_HOR_HIGH_ACT 1 /* horizontal sync high active */ |
#define FB_SYNC_VERT_HIGH_ACT 2 /* vertical sync high active */ |
#define FB_SYNC_EXT 4 /* external sync */ |
#define FB_SYNC_COMP_HIGH_ACT 8 /* composite sync high active */ |
#define FB_SYNC_BROADCAST 16 /* broadcast video timings */ |
/* vtotal = 144d/288n/576i => PAL */ |
/* vtotal = 121d/242n/484i => NTSC */ |
#define FB_SYNC_ON_GREEN 32 /* sync on green */ |
#define FB_VMODE_NONINTERLACED 0 /* non interlaced */ |
#define FB_VMODE_INTERLACED 1 /* interlaced */ |
#define FB_VMODE_DOUBLE 2 /* double scan */ |
#define FB_VMODE_ODD_FLD_FIRST 4 /* interlaced: top line first */ |
#define FB_VMODE_MASK 255 |
#define FB_VMODE_YWRAP 256 /* ywrap instead of panning */ |
#define FB_VMODE_SMOOTH_XPAN 512 /* smooth xpan possible (internally used) */ |
#define FB_VMODE_CONUPDATE 512 /* don't update x/yoffset */ |
/* |
* Display rotation support |
*/ |
#define FB_ROTATE_UR 0 |
#define FB_ROTATE_CW 1 |
#define FB_ROTATE_UD 2 |
#define FB_ROTATE_CCW 3 |
#define PICOS2KHZ(a) (1000000000UL/(a)) |
#define KHZ2PICOS(a) (1000000000UL/(a)) |
struct fb_var_screeninfo { |
__u32 xres; /* visible resolution */ |
__u32 yres; |
__u32 xres_virtual; /* virtual resolution */ |
__u32 yres_virtual; |
__u32 xoffset; /* offset from virtual to visible */ |
__u32 yoffset; /* resolution */ |
__u32 bits_per_pixel; /* guess what */ |
__u32 grayscale; /* != 0 Graylevels instead of colors */ |
struct fb_bitfield red; /* bitfield in fb mem if true color, */ |
struct fb_bitfield green; /* else only length is significant */ |
struct fb_bitfield blue; |
struct fb_bitfield transp; /* transparency */ |
__u32 nonstd; /* != 0 Non standard pixel format */ |
__u32 activate; /* see FB_ACTIVATE_* */ |
__u32 height; /* height of picture in mm */ |
__u32 width; /* width of picture in mm */ |
__u32 accel_flags; /* (OBSOLETE) see fb_info.flags */ |
/* Timing: All values in pixclocks, except pixclock (of course) */ |
__u32 pixclock; /* pixel clock in ps (pico seconds) */ |
__u32 left_margin; /* time from sync to picture */ |
__u32 right_margin; /* time from picture to sync */ |
__u32 upper_margin; /* time from sync to picture */ |
__u32 lower_margin; |
__u32 hsync_len; /* length of horizontal sync */ |
__u32 vsync_len; /* length of vertical sync */ |
__u32 sync; /* see FB_SYNC_* */ |
__u32 vmode; /* see FB_VMODE_* */ |
__u32 rotate; /* angle we rotate counter clockwise */ |
__u32 reserved[5]; /* Reserved for future compatibility */ |
}; |
struct fb_cmap { |
__u32 start; /* First entry */ |
__u32 len; /* Number of entries */ |
__u16 *red; /* Red values */ |
__u16 *green; |
__u16 *blue; |
__u16 *transp; /* transparency, can be NULL */ |
}; |
struct fb_con2fbmap { |
__u32 console; |
__u32 framebuffer; |
}; |
/* VESA Blanking Levels */ |
#define VESA_NO_BLANKING 0 |
#define VESA_VSYNC_SUSPEND 1 |
#define VESA_HSYNC_SUSPEND 2 |
#define VESA_POWERDOWN 3 |
enum { |
/* screen: unblanked, hsync: on, vsync: on */ |
FB_BLANK_UNBLANK = VESA_NO_BLANKING, |
/* screen: blanked, hsync: on, vsync: on */ |
FB_BLANK_NORMAL = VESA_NO_BLANKING + 1, |
/* screen: blanked, hsync: on, vsync: off */ |
FB_BLANK_VSYNC_SUSPEND = VESA_VSYNC_SUSPEND + 1, |
/* screen: blanked, hsync: off, vsync: on */ |
FB_BLANK_HSYNC_SUSPEND = VESA_HSYNC_SUSPEND + 1, |
/* screen: blanked, hsync: off, vsync: off */ |
FB_BLANK_POWERDOWN = VESA_POWERDOWN + 1 |
}; |
#define FB_VBLANK_VBLANKING 0x001 /* currently in a vertical blank */ |
#define FB_VBLANK_HBLANKING 0x002 /* currently in a horizontal blank */ |
#define FB_VBLANK_HAVE_VBLANK 0x004 /* vertical blanks can be detected */ |
#define FB_VBLANK_HAVE_HBLANK 0x008 /* horizontal blanks can be detected */ |
#define FB_VBLANK_HAVE_COUNT 0x010 /* global retrace counter is available */ |
#define FB_VBLANK_HAVE_VCOUNT 0x020 /* the vcount field is valid */ |
#define FB_VBLANK_HAVE_HCOUNT 0x040 /* the hcount field is valid */ |
#define FB_VBLANK_VSYNCING 0x080 /* currently in a vsync */ |
#define FB_VBLANK_HAVE_VSYNC 0x100 /* verical syncs can be detected */ |
struct fb_vblank { |
__u32 flags; /* FB_VBLANK flags */ |
__u32 count; /* counter of retraces since boot */ |
__u32 vcount; /* current scanline position */ |
__u32 hcount; /* current scandot position */ |
__u32 reserved[4]; /* reserved for future compatibility */ |
}; |
/* Internal HW accel */ |
#define ROP_COPY 0 |
#define ROP_XOR 1 |
struct fb_copyarea { |
__u32 dx; |
__u32 dy; |
__u32 width; |
__u32 height; |
__u32 sx; |
__u32 sy; |
}; |
struct fb_fillrect { |
__u32 dx; /* screen-relative */ |
__u32 dy; |
__u32 width; |
__u32 height; |
__u32 color; |
__u32 rop; |
}; |
struct fb_image { |
__u32 dx; /* Where to place image */ |
__u32 dy; |
__u32 width; /* Size of image */ |
__u32 height; |
__u32 fg_color; /* Only used when a mono bitmap */ |
__u32 bg_color; |
__u8 depth; /* Depth of the image */ |
const char *data; /* Pointer to image data */ |
struct fb_cmap cmap; /* color map info */ |
}; |
/* |
* hardware cursor control |
*/ |
#define FB_CUR_SETIMAGE 0x01 |
#define FB_CUR_SETPOS 0x02 |
#define FB_CUR_SETHOT 0x04 |
#define FB_CUR_SETCMAP 0x08 |
#define FB_CUR_SETSHAPE 0x10 |
#define FB_CUR_SETSIZE 0x20 |
#define FB_CUR_SETALL 0xFF |
struct fbcurpos { |
__u16 x, y; |
}; |
struct fb_cursor { |
__u16 set; /* what to set */ |
__u16 enable; /* cursor on/off */ |
__u16 rop; /* bitop operation */ |
const char *mask; /* cursor mask bits */ |
struct fbcurpos hot; /* cursor hot spot */ |
struct fb_image image; /* Cursor image */ |
}; |
#ifdef CONFIG_FB_BACKLIGHT |
/* Settings for the generic backlight code */ |
#define FB_BACKLIGHT_LEVELS 128 |
#define FB_BACKLIGHT_MAX 0xFF |
#endif |
//#ifdef __KERNEL__ |
//#include <linux/fs.h> |
//#include <linux/init.h> |
//#include <linux/device.h> |
//#include <linux/workqueue.h> |
//#include <linux/notifier.h> |
#include <linux/list.h> |
//#include <linux/backlight.h> |
//#include <asm/io.h> |
//struct vm_area_struct; |
//struct fb_info; |
//struct device; |
//struct file; |
/* Definitions below are used in the parsed monitor specs */ |
#define FB_DPMS_ACTIVE_OFF 1 |
#define FB_DPMS_SUSPEND 2 |
#define FB_DPMS_STANDBY 4 |
#define FB_DISP_DDI 1 |
#define FB_DISP_ANA_700_300 2 |
#define FB_DISP_ANA_714_286 4 |
#define FB_DISP_ANA_1000_400 8 |
#define FB_DISP_ANA_700_000 16 |
#define FB_DISP_MONO 32 |
#define FB_DISP_RGB 64 |
#define FB_DISP_MULTI 128 |
#define FB_DISP_UNKNOWN 256 |
#define FB_SIGNAL_NONE 0 |
#define FB_SIGNAL_BLANK_BLANK 1 |
#define FB_SIGNAL_SEPARATE 2 |
#define FB_SIGNAL_COMPOSITE 4 |
#define FB_SIGNAL_SYNC_ON_GREEN 8 |
#define FB_SIGNAL_SERRATION_ON 16 |
#define FB_MISC_PRIM_COLOR 1 |
#define FB_MISC_1ST_DETAIL 2 /* First Detailed Timing is preferred */ |
struct fb_chroma { |
__u32 redx; /* in fraction of 1024 */ |
__u32 greenx; |
__u32 bluex; |
__u32 whitex; |
__u32 redy; |
__u32 greeny; |
__u32 bluey; |
__u32 whitey; |
}; |
struct fb_monspecs { |
struct fb_chroma chroma; |
struct fb_videomode *modedb; /* mode database */ |
__u8 manufacturer[4]; /* Manufacturer */ |
__u8 monitor[14]; /* Monitor String */ |
__u8 serial_no[14]; /* Serial Number */ |
__u8 ascii[14]; /* ? */ |
__u32 modedb_len; /* mode database length */ |
__u32 model; /* Monitor Model */ |
__u32 serial; /* Serial Number - Integer */ |
__u32 year; /* Year manufactured */ |
__u32 week; /* Week Manufactured */ |
__u32 hfmin; /* hfreq lower limit (Hz) */ |
__u32 hfmax; /* hfreq upper limit (Hz) */ |
__u32 dclkmin; /* pixelclock lower limit (Hz) */ |
__u32 dclkmax; /* pixelclock upper limit (Hz) */ |
__u16 input; /* display type - see FB_DISP_* */ |
__u16 dpms; /* DPMS support - see FB_DPMS_ */ |
__u16 signal; /* Signal Type - see FB_SIGNAL_* */ |
__u16 vfmin; /* vfreq lower limit (Hz) */ |
__u16 vfmax; /* vfreq upper limit (Hz) */ |
__u16 gamma; /* Gamma - in fractions of 100 */ |
__u16 gtf : 1; /* supports GTF */ |
__u16 misc; /* Misc flags - see FB_MISC_* */ |
__u8 version; /* EDID version... */ |
__u8 revision; /* ...and revision */ |
__u8 max_x; /* Maximum horizontal size (cm) */ |
__u8 max_y; /* Maximum vertical size (cm) */ |
}; |
struct fb_cmap_user { |
__u32 start; /* First entry */ |
__u32 len; /* Number of entries */ |
__u16 __user *red; /* Red values */ |
__u16 __user *green; |
__u16 __user *blue; |
__u16 __user *transp; /* transparency, can be NULL */ |
}; |
struct fb_image_user { |
__u32 dx; /* Where to place image */ |
__u32 dy; |
__u32 width; /* Size of image */ |
__u32 height; |
__u32 fg_color; /* Only used when a mono bitmap */ |
__u32 bg_color; |
__u8 depth; /* Depth of the image */ |
const char __user *data; /* Pointer to image data */ |
struct fb_cmap_user cmap; /* color map info */ |
}; |
struct fb_cursor_user { |
__u16 set; /* what to set */ |
__u16 enable; /* cursor on/off */ |
__u16 rop; /* bitop operation */ |
const char __user *mask; /* cursor mask bits */ |
struct fbcurpos hot; /* cursor hot spot */ |
struct fb_image_user image; /* Cursor image */ |
}; |
/* |
* Register/unregister for framebuffer events |
*/ |
/* The resolution of the passed in fb_info about to change */ |
#define FB_EVENT_MODE_CHANGE 0x01 |
/* The display on this fb_info is beeing suspended, no access to the |
* framebuffer is allowed any more after that call returns |
*/ |
#define FB_EVENT_SUSPEND 0x02 |
/* The display on this fb_info was resumed, you can restore the display |
* if you own it |
*/ |
#define FB_EVENT_RESUME 0x03 |
/* An entry from the modelist was removed */ |
#define FB_EVENT_MODE_DELETE 0x04 |
/* A driver registered itself */ |
#define FB_EVENT_FB_REGISTERED 0x05 |
/* A driver unregistered itself */ |
#define FB_EVENT_FB_UNREGISTERED 0x06 |
/* CONSOLE-SPECIFIC: get console to framebuffer mapping */ |
#define FB_EVENT_GET_CONSOLE_MAP 0x07 |
/* CONSOLE-SPECIFIC: set console to framebuffer mapping */ |
#define FB_EVENT_SET_CONSOLE_MAP 0x08 |
/* A hardware display blank change occured */ |
#define FB_EVENT_BLANK 0x09 |
/* Private modelist is to be replaced */ |
#define FB_EVENT_NEW_MODELIST 0x0A |
/* The resolution of the passed in fb_info about to change and |
all vc's should be changed */ |
#define FB_EVENT_MODE_CHANGE_ALL 0x0B |
/* A software display blank change occured */ |
#define FB_EVENT_CONBLANK 0x0C |
/* Get drawing requirements */ |
#define FB_EVENT_GET_REQ 0x0D |
/* Unbind from the console if possible */ |
#define FB_EVENT_FB_UNBIND 0x0E |
struct fb_event { |
struct fb_info *info; |
void *data; |
}; |
struct fb_blit_caps { |
u32 x; |
u32 y; |
u32 len; |
u32 flags; |
}; |
/* |
* Pixmap structure definition |
* |
* The purpose of this structure is to translate data |
* from the hardware independent format of fbdev to what |
* format the hardware needs. |
*/ |
#define FB_PIXMAP_DEFAULT 1 /* used internally by fbcon */ |
#define FB_PIXMAP_SYSTEM 2 /* memory is in system RAM */ |
#define FB_PIXMAP_IO 4 /* memory is iomapped */ |
#define FB_PIXMAP_SYNC 256 /* set if GPU can DMA */ |
struct fb_pixmap { |
u8 *addr; /* pointer to memory */ |
u32 size; /* size of buffer in bytes */ |
u32 offset; /* current offset to buffer */ |
u32 buf_align; /* byte alignment of each bitmap */ |
u32 scan_align; /* alignment per scanline */ |
u32 access_align; /* alignment per read/write (bits) */ |
u32 flags; /* see FB_PIXMAP_* */ |
u32 blit_x; /* supported bit block dimensions (1-32)*/ |
u32 blit_y; /* Format: blit_x = 1 << (width - 1) */ |
/* blit_y = 1 << (height - 1) */ |
/* if 0, will be set to 0xffffffff (all)*/ |
/* access methods */ |
void (*writeio)(struct fb_info *info, void __iomem *dst, void *src, unsigned int size); |
void (*readio) (struct fb_info *info, void *dst, void __iomem *src, unsigned int size); |
}; |
#ifdef CONFIG_FB_DEFERRED_IO |
struct fb_deferred_io { |
/* delay between mkwrite and deferred handler */ |
unsigned long delay; |
struct mutex lock; /* mutex that protects the page list */ |
struct list_head pagelist; /* list of touched pages */ |
/* callback */ |
void (*deferred_io)(struct fb_info *info, struct list_head *pagelist); |
}; |
#endif |
/* |
* Frame buffer operations |
* |
* LOCKING NOTE: those functions must _ALL_ be called with the console |
* semaphore held, this is the only suitable locking mechanism we have |
* in 2.6. Some may be called at interrupt time at this point though. |
*/ |
struct fb_ops { |
/* open/release and usage marking */ |
struct module *owner; |
int (*fb_open)(struct fb_info *info, int user); |
int (*fb_release)(struct fb_info *info, int user); |
/* For framebuffers with strange non linear layouts or that do not |
* work with normal memory mapped access |
*/ |
ssize_t (*fb_read)(struct fb_info *info, char __user *buf, |
size_t count, loff_t *ppos); |
ssize_t (*fb_write)(struct fb_info *info, const char __user *buf, |
size_t count, loff_t *ppos); |
/* checks var and eventually tweaks it to something supported, |
* DO NOT MODIFY PAR */ |
int (*fb_check_var)(struct fb_var_screeninfo *var, struct fb_info *info); |
/* set the video mode according to info->var */ |
int (*fb_set_par)(struct fb_info *info); |
/* set color register */ |
int (*fb_setcolreg)(unsigned regno, unsigned red, unsigned green, |
unsigned blue, unsigned transp, struct fb_info *info); |
/* set color registers in batch */ |
int (*fb_setcmap)(struct fb_cmap *cmap, struct fb_info *info); |
/* blank display */ |
int (*fb_blank)(int blank, struct fb_info *info); |
/* pan display */ |
int (*fb_pan_display)(struct fb_var_screeninfo *var, struct fb_info *info); |
/* Draws a rectangle */ |
void (*fb_fillrect) (struct fb_info *info, const struct fb_fillrect *rect); |
/* Copy data from area to another */ |
void (*fb_copyarea) (struct fb_info *info, const struct fb_copyarea *region); |
/* Draws a image to the display */ |
void (*fb_imageblit) (struct fb_info *info, const struct fb_image *image); |
/* Draws cursor */ |
int (*fb_cursor) (struct fb_info *info, struct fb_cursor *cursor); |
/* Rotates the display */ |
void (*fb_rotate)(struct fb_info *info, int angle); |
/* wait for blit idle, optional */ |
int (*fb_sync)(struct fb_info *info); |
/* perform fb specific ioctl (optional) */ |
int (*fb_ioctl)(struct fb_info *info, unsigned int cmd, |
unsigned long arg); |
/* Handle 32bit compat ioctl (optional) */ |
int (*fb_compat_ioctl)(struct fb_info *info, unsigned cmd, |
unsigned long arg); |
/* perform fb specific mmap */ |
// int (*fb_mmap)(struct fb_info *info, struct vm_area_struct *vma); |
/* get capability given var */ |
void (*fb_get_caps)(struct fb_info *info, struct fb_blit_caps *caps, |
struct fb_var_screeninfo *var); |
/* teardown any resources to do with this framebuffer */ |
void (*fb_destroy)(struct fb_info *info); |
}; |
#ifdef CONFIG_FB_TILEBLITTING |
#define FB_TILE_CURSOR_NONE 0 |
#define FB_TILE_CURSOR_UNDERLINE 1 |
#define FB_TILE_CURSOR_LOWER_THIRD 2 |
#define FB_TILE_CURSOR_LOWER_HALF 3 |
#define FB_TILE_CURSOR_TWO_THIRDS 4 |
#define FB_TILE_CURSOR_BLOCK 5 |
struct fb_tilemap { |
__u32 width; /* width of each tile in pixels */ |
__u32 height; /* height of each tile in scanlines */ |
__u32 depth; /* color depth of each tile */ |
__u32 length; /* number of tiles in the map */ |
const __u8 *data; /* actual tile map: a bitmap array, packed |
to the nearest byte */ |
}; |
struct fb_tilerect { |
__u32 sx; /* origin in the x-axis */ |
__u32 sy; /* origin in the y-axis */ |
__u32 width; /* number of tiles in the x-axis */ |
__u32 height; /* number of tiles in the y-axis */ |
__u32 index; /* what tile to use: index to tile map */ |
__u32 fg; /* foreground color */ |
__u32 bg; /* background color */ |
__u32 rop; /* raster operation */ |
}; |
struct fb_tilearea { |
__u32 sx; /* source origin in the x-axis */ |
__u32 sy; /* source origin in the y-axis */ |
__u32 dx; /* destination origin in the x-axis */ |
__u32 dy; /* destination origin in the y-axis */ |
__u32 width; /* number of tiles in the x-axis */ |
__u32 height; /* number of tiles in the y-axis */ |
}; |
struct fb_tileblit { |
__u32 sx; /* origin in the x-axis */ |
__u32 sy; /* origin in the y-axis */ |
__u32 width; /* number of tiles in the x-axis */ |
__u32 height; /* number of tiles in the y-axis */ |
__u32 fg; /* foreground color */ |
__u32 bg; /* background color */ |
__u32 length; /* number of tiles to draw */ |
__u32 *indices; /* array of indices to tile map */ |
}; |
struct fb_tilecursor { |
__u32 sx; /* cursor position in the x-axis */ |
__u32 sy; /* cursor position in the y-axis */ |
__u32 mode; /* 0 = erase, 1 = draw */ |
__u32 shape; /* see FB_TILE_CURSOR_* */ |
__u32 fg; /* foreground color */ |
__u32 bg; /* background color */ |
}; |
struct fb_tile_ops { |
/* set tile characteristics */ |
void (*fb_settile)(struct fb_info *info, struct fb_tilemap *map); |
/* all dimensions from hereon are in terms of tiles */ |
/* move a rectangular region of tiles from one area to another*/ |
void (*fb_tilecopy)(struct fb_info *info, struct fb_tilearea *area); |
/* fill a rectangular region with a tile */ |
void (*fb_tilefill)(struct fb_info *info, struct fb_tilerect *rect); |
/* copy an array of tiles */ |
void (*fb_tileblit)(struct fb_info *info, struct fb_tileblit *blit); |
/* cursor */ |
void (*fb_tilecursor)(struct fb_info *info, |
struct fb_tilecursor *cursor); |
/* get maximum length of the tile map */ |
int (*fb_get_tilemax)(struct fb_info *info); |
}; |
#endif /* CONFIG_FB_TILEBLITTING */ |
/* FBINFO_* = fb_info.flags bit flags */ |
#define FBINFO_MODULE 0x0001 /* Low-level driver is a module */ |
#define FBINFO_HWACCEL_DISABLED 0x0002 |
/* When FBINFO_HWACCEL_DISABLED is set: |
* Hardware acceleration is turned off. Software implementations |
* of required functions (copyarea(), fillrect(), and imageblit()) |
* takes over; acceleration engine should be in a quiescent state */ |
/* hints */ |
#define FBINFO_PARTIAL_PAN_OK 0x0040 /* otw use pan only for double-buffering */ |
#define FBINFO_READS_FAST 0x0080 /* soft-copy faster than rendering */ |
/* hardware supported ops */ |
/* semantics: when a bit is set, it indicates that the operation is |
* accelerated by hardware. |
* required functions will still work even if the bit is not set. |
* optional functions may not even exist if the flag bit is not set. |
*/ |
#define FBINFO_HWACCEL_NONE 0x0000 |
#define FBINFO_HWACCEL_COPYAREA 0x0100 /* required */ |
#define FBINFO_HWACCEL_FILLRECT 0x0200 /* required */ |
#define FBINFO_HWACCEL_IMAGEBLIT 0x0400 /* required */ |
#define FBINFO_HWACCEL_ROTATE 0x0800 /* optional */ |
#define FBINFO_HWACCEL_XPAN 0x1000 /* optional */ |
#define FBINFO_HWACCEL_YPAN 0x2000 /* optional */ |
#define FBINFO_HWACCEL_YWRAP 0x4000 /* optional */ |
#define FBINFO_MISC_USEREVENT 0x10000 /* event request |
from userspace */ |
#define FBINFO_MISC_TILEBLITTING 0x20000 /* use tile blitting */ |
#define FBINFO_MISC_FIRMWARE 0x40000 /* a replaceable firmware |
inited framebuffer */ |
/* A driver may set this flag to indicate that it does want a set_par to be |
* called every time when fbcon_switch is executed. The advantage is that with |
* this flag set you can really be sure that set_par is always called before |
* any of the functions dependant on the correct hardware state or altering |
* that state, even if you are using some broken X releases. The disadvantage |
* is that it introduces unwanted delays to every console switch if set_par |
* is slow. It is a good idea to try this flag in the drivers initialization |
* code whenever there is a bug report related to switching between X and the |
* framebuffer console. |
*/ |
#define FBINFO_MISC_ALWAYS_SETPAR 0x40000 |
/* |
* Host and GPU endianness differ. |
*/ |
#define FBINFO_FOREIGN_ENDIAN 0x100000 |
/* |
* Big endian math. This is the same flags as above, but with different |
* meaning, it is set by the fb subsystem depending FOREIGN_ENDIAN flag |
* and host endianness. Drivers should not use this flag. |
*/ |
#define FBINFO_BE_MATH 0x100000 |
struct fb_info { |
int node; |
int flags; |
// struct mutex lock; /* Lock for open/release/ioctl funcs */ |
// struct mutex mm_lock; /* Lock for fb_mmap and smem_* fields */ |
struct fb_var_screeninfo var; /* Current var */ |
struct fb_fix_screeninfo fix; /* Current fix */ |
struct fb_monspecs monspecs; /* Current Monitor specs */ |
// struct work_struct queue; /* Framebuffer event queue */ |
// struct fb_pixmap pixmap; /* Image hardware mapper */ |
// struct fb_pixmap sprite; /* Cursor hardware mapper */ |
// struct fb_cmap cmap; /* Current cmap */ |
struct list_head modelist; /* mode list */ |
struct fb_videomode *mode; /* current mode */ |
#ifdef CONFIG_FB_BACKLIGHT |
/* assigned backlight device */ |
/* set before framebuffer registration, |
remove after unregister */ |
struct backlight_device *bl_dev; |
/* Backlight level curve */ |
struct mutex bl_curve_mutex; |
u8 bl_curve[FB_BACKLIGHT_LEVELS]; |
#endif |
#ifdef CONFIG_FB_DEFERRED_IO |
struct delayed_work deferred_work; |
struct fb_deferred_io *fbdefio; |
#endif |
struct fb_ops *fbops; |
// struct device *device; /* This is the parent */ |
// struct device *dev; /* This is this fb device */ |
int class_flag; /* private sysfs flags */ |
#ifdef CONFIG_FB_TILEBLITTING |
struct fb_tile_ops *tileops; /* Tile Blitting */ |
#endif |
char __iomem *screen_base; /* Virtual address */ |
unsigned long screen_size; /* Amount of ioremapped VRAM or 0 */ |
void *pseudo_palette; /* Fake palette of 16 colors */ |
#define FBINFO_STATE_RUNNING 0 |
#define FBINFO_STATE_SUSPENDED 1 |
u32 state; /* Hardware state i.e suspend */ |
void *fbcon_par; /* fbcon use-only private area */ |
/* From here on everything is device dependent */ |
void *par; |
/* we need the PCI or similiar aperture base/size not |
smem_start/size as smem_start may just be an object |
allocated inside the aperture so may not actually overlap */ |
resource_size_t aperture_base; |
resource_size_t aperture_size; |
}; |
#ifdef MODULE |
#define FBINFO_DEFAULT FBINFO_MODULE |
#else |
#define FBINFO_DEFAULT 0 |
#endif |
// This will go away |
#define FBINFO_FLAG_MODULE FBINFO_MODULE |
#define FBINFO_FLAG_DEFAULT FBINFO_DEFAULT |
/* This will go away |
* fbset currently hacks in FB_ACCELF_TEXT into var.accel_flags |
* when it wants to turn the acceleration engine on. This is |
* really a separate operation, and should be modified via sysfs. |
* But for now, we leave it broken with the following define |
*/ |
#define STUPID_ACCELF_TEXT_SHIT |
#define fb_readb(addr) (*(volatile u8 *) (addr)) |
#define fb_readw(addr) (*(volatile u16 *) (addr)) |
#define fb_readl(addr) (*(volatile u32 *) (addr)) |
#define fb_readq(addr) (*(volatile u64 *) (addr)) |
#define fb_writeb(b,addr) (*(volatile u8 *) (addr) = (b)) |
#define fb_writew(b,addr) (*(volatile u16 *) (addr) = (b)) |
#define fb_writel(b,addr) (*(volatile u32 *) (addr) = (b)) |
#define fb_writeq(b,addr) (*(volatile u64 *) (addr) = (b)) |
#define fb_memset memset |
#define FB_LEFT_POS(p, bpp) (fb_be_math(p) ? (32 - (bpp)) : 0) |
#define FB_SHIFT_HIGH(p, val, bits) (fb_be_math(p) ? (val) >> (bits) : \ |
(val) << (bits)) |
#define FB_SHIFT_LOW(p, val, bits) (fb_be_math(p) ? (val) << (bits) : \ |
(val) >> (bits)) |
/* |
* `Generic' versions of the frame buffer device operations |
*/ |
extern int fb_set_var(struct fb_info *info, struct fb_var_screeninfo *var); |
extern int fb_pan_display(struct fb_info *info, struct fb_var_screeninfo *var); |
extern int fb_blank(struct fb_info *info, int blank); |
extern void cfb_fillrect(struct fb_info *info, const struct fb_fillrect *rect); |
extern void cfb_copyarea(struct fb_info *info, const struct fb_copyarea *area); |
extern void cfb_imageblit(struct fb_info *info, const struct fb_image *image); |
/* |
* Drawing operations where framebuffer is in system RAM |
*/ |
extern void sys_fillrect(struct fb_info *info, const struct fb_fillrect *rect); |
extern void sys_copyarea(struct fb_info *info, const struct fb_copyarea *area); |
extern void sys_imageblit(struct fb_info *info, const struct fb_image *image); |
extern ssize_t fb_sys_read(struct fb_info *info, char __user *buf, |
size_t count, loff_t *ppos); |
extern ssize_t fb_sys_write(struct fb_info *info, const char __user *buf, |
size_t count, loff_t *ppos); |
/* drivers/video/fbmem.c */ |
extern int register_framebuffer(struct fb_info *fb_info); |
extern int unregister_framebuffer(struct fb_info *fb_info); |
extern int fb_prepare_logo(struct fb_info *fb_info, int rotate); |
extern int fb_show_logo(struct fb_info *fb_info, int rotate); |
extern char* fb_get_buffer_offset(struct fb_info *info, struct fb_pixmap *buf, u32 size); |
extern void fb_pad_unaligned_buffer(u8 *dst, u32 d_pitch, u8 *src, u32 idx, |
u32 height, u32 shift_high, u32 shift_low, u32 mod); |
extern void fb_pad_aligned_buffer(u8 *dst, u32 d_pitch, u8 *src, u32 s_pitch, u32 height); |
extern void fb_set_suspend(struct fb_info *info, int state); |
extern int fb_get_color_depth(struct fb_var_screeninfo *var, |
struct fb_fix_screeninfo *fix); |
extern int fb_get_options(char *name, char **option); |
extern int fb_new_modelist(struct fb_info *info); |
extern struct fb_info *registered_fb[FB_MAX]; |
extern int num_registered_fb; |
extern struct class *fb_class; |
extern int lock_fb_info(struct fb_info *info); |
static inline void unlock_fb_info(struct fb_info *info) |
{ |
// mutex_unlock(&info->lock); |
} |
static inline void __fb_pad_aligned_buffer(u8 *dst, u32 d_pitch, |
u8 *src, u32 s_pitch, u32 height) |
{ |
int i, j; |
d_pitch -= s_pitch; |
for (i = height; i--; ) { |
/* s_pitch is a few bytes at the most, memcpy is suboptimal */ |
for (j = 0; j < s_pitch; j++) |
*dst++ = *src++; |
dst += d_pitch; |
} |
} |
/* drivers/video/fb_defio.c */ |
static inline bool fb_be_math(struct fb_info *info) |
{ |
#ifdef CONFIG_FB_FOREIGN_ENDIAN |
#if defined(CONFIG_FB_BOTH_ENDIAN) |
return info->flags & FBINFO_BE_MATH; |
#elif defined(CONFIG_FB_BIG_ENDIAN) |
return true; |
#elif defined(CONFIG_FB_LITTLE_ENDIAN) |
return false; |
#endif /* CONFIG_FB_BOTH_ENDIAN */ |
#else |
#ifdef __BIG_ENDIAN |
return true; |
#else |
return false; |
#endif /* __BIG_ENDIAN */ |
#endif /* CONFIG_FB_FOREIGN_ENDIAN */ |
} |
/* drivers/video/fbsysfs.c */ |
//extern struct fb_info *framebuffer_alloc(size_t size, struct device *dev); |
//extern void framebuffer_release(struct fb_info *info); |
//extern int fb_init_device(struct fb_info *fb_info); |
//extern void fb_cleanup_device(struct fb_info *head); |
//extern void fb_bl_default_curve(struct fb_info *fb_info, u8 off, u8 min, u8 max); |
/* drivers/video/fbmon.c */ |
#define FB_MAXTIMINGS 0 |
#define FB_VSYNCTIMINGS 1 |
#define FB_HSYNCTIMINGS 2 |
#define FB_DCLKTIMINGS 3 |
#define FB_IGNOREMON 0x100 |
#define FB_MODE_IS_UNKNOWN 0 |
#define FB_MODE_IS_DETAILED 1 |
#define FB_MODE_IS_STANDARD 2 |
#define FB_MODE_IS_VESA 4 |
#define FB_MODE_IS_CALCULATED 8 |
#define FB_MODE_IS_FIRST 16 |
#define FB_MODE_IS_FROM_VAR 32 |
extern int fbmon_dpms(const struct fb_info *fb_info); |
extern int fb_get_mode(int flags, u32 val, struct fb_var_screeninfo *var, |
struct fb_info *info); |
extern int fb_validate_mode(const struct fb_var_screeninfo *var, |
struct fb_info *info); |
extern int fb_parse_edid(unsigned char *edid, struct fb_var_screeninfo *var); |
//extern const unsigned char *fb_firmware_edid(struct device *device); |
extern void fb_edid_to_monspecs(unsigned char *edid, |
struct fb_monspecs *specs); |
extern void fb_destroy_modedb(struct fb_videomode *modedb); |
extern int fb_find_mode_cvt(struct fb_videomode *mode, int margins, int rb); |
extern unsigned char *fb_ddc_read(struct i2c_adapter *adapter); |
/* drivers/video/modedb.c */ |
#define VESA_MODEDB_SIZE 34 |
extern void fb_var_to_videomode(struct fb_videomode *mode, |
const struct fb_var_screeninfo *var); |
extern void fb_videomode_to_var(struct fb_var_screeninfo *var, |
const struct fb_videomode *mode); |
extern int fb_mode_is_equal(const struct fb_videomode *mode1, |
const struct fb_videomode *mode2); |
extern int fb_add_videomode(const struct fb_videomode *mode, |
struct list_head *head); |
extern void fb_delete_videomode(const struct fb_videomode *mode, |
struct list_head *head); |
extern const struct fb_videomode *fb_match_mode(const struct fb_var_screeninfo *var, |
struct list_head *head); |
extern const struct fb_videomode *fb_find_best_mode(const struct fb_var_screeninfo *var, |
struct list_head *head); |
extern const struct fb_videomode *fb_find_nearest_mode(const struct fb_videomode *mode, |
struct list_head *head); |
extern void fb_destroy_modelist(struct list_head *head); |
extern void fb_videomode_to_modelist(const struct fb_videomode *modedb, int num, |
struct list_head *head); |
extern const struct fb_videomode *fb_find_best_display(const struct fb_monspecs *specs, |
struct list_head *head); |
/* drivers/video/fbcmap.c */ |
extern int fb_alloc_cmap(struct fb_cmap *cmap, int len, int transp); |
extern void fb_dealloc_cmap(struct fb_cmap *cmap); |
extern int fb_copy_cmap(const struct fb_cmap *from, struct fb_cmap *to); |
extern int fb_cmap_to_user(const struct fb_cmap *from, struct fb_cmap_user *to); |
extern int fb_set_cmap(struct fb_cmap *cmap, struct fb_info *fb_info); |
extern int fb_set_user_cmap(struct fb_cmap_user *cmap, struct fb_info *fb_info); |
extern const struct fb_cmap *fb_default_cmap(int len); |
extern void fb_invert_cmaps(void); |
struct fb_videomode { |
const char *name; /* optional */ |
u32 refresh; /* optional */ |
u32 xres; |
u32 yres; |
u32 pixclock; |
u32 left_margin; |
u32 right_margin; |
u32 upper_margin; |
u32 lower_margin; |
u32 hsync_len; |
u32 vsync_len; |
u32 sync; |
u32 vmode; |
u32 flag; |
}; |
extern const char *fb_mode_option; |
extern const struct fb_videomode vesa_modes[]; |
struct fb_modelist { |
struct list_head list; |
struct fb_videomode mode; |
}; |
extern int fb_find_mode(struct fb_var_screeninfo *var, |
struct fb_info *info, const char *mode_option, |
const struct fb_videomode *db, |
unsigned int dbsize, |
const struct fb_videomode *default_mode, |
unsigned int default_bpp); |
#endif /* _LINUX_FB_H */ |
/drivers/include/linux/firmware.h |
---|
0,0 → 1,65 |
#ifndef _LINUX_FIRMWARE_H |
#define _LINUX_FIRMWARE_H |
#include <linux/module.h> |
#include <linux/types.h> |
//#include <linux/compiler.h> |
#define FW_ACTION_NOHOTPLUG 0 |
#define FW_ACTION_HOTPLUG 1 |
struct firmware { |
size_t size; |
const u8 *data; |
}; |
struct device; |
struct builtin_fw { |
char *name; |
void *data; |
unsigned long size; |
}; |
/* We have to play tricks here much like stringify() to get the |
__COUNTER__ macro to be expanded as we want it */ |
#define __fw_concat1(x, y) x##y |
#define __fw_concat(x, y) __fw_concat1(x, y) |
#define DECLARE_BUILTIN_FIRMWARE(name, blob) \ |
DECLARE_BUILTIN_FIRMWARE_SIZE(name, &(blob), sizeof(blob)) |
#define DECLARE_BUILTIN_FIRMWARE_SIZE(name, blob, size) \ |
static const struct builtin_fw __fw_concat(__builtin_fw,__COUNTER__) \ |
__used __section(.builtin_fw) = { name, blob, size } |
#if defined(CONFIG_FW_LOADER) || (defined(CONFIG_FW_LOADER_MODULE) && defined(MODULE)) |
int request_firmware(const struct firmware **fw, const char *name, |
struct device *device); |
int request_firmware_nowait( |
struct module *module, int uevent, |
const char *name, struct device *device, void *context, |
void (*cont)(const struct firmware *fw, void *context)); |
void release_firmware(const struct firmware *fw); |
#else |
static inline int request_firmware(const struct firmware **fw, |
const char *name, |
struct device *device) |
{ |
return -EINVAL; |
} |
static inline int request_firmware_nowait( |
struct module *module, int uevent, |
const char *name, struct device *device, void *context, |
void (*cont)(const struct firmware *fw, void *context)) |
{ |
return -EINVAL; |
} |
static inline void release_firmware(const struct firmware *fw) |
{ |
} |
#endif |
#endif |
/drivers/include/linux/i2c-algo-bit.h |
---|
0,0 → 1,51 |
/* ------------------------------------------------------------------------- */ |
/* i2c-algo-bit.h i2c driver algorithms for bit-shift adapters */ |
/* ------------------------------------------------------------------------- */ |
/* Copyright (C) 1995-99 Simon G. Vogl |
This program is free software; you can redistribute it and/or modify |
it under the terms of the GNU General Public License as published by |
the Free Software Foundation; either version 2 of the License, or |
(at your option) any later version. |
This program is distributed in the hope that it will be useful, |
but WITHOUT ANY WARRANTY; without even the implied warranty of |
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
GNU General Public License for more details. |
You should have received a copy of the GNU General Public License |
along with this program; if not, write to the Free Software |
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ |
/* ------------------------------------------------------------------------- */ |
/* With some changes from Kyösti Mälkki <kmalkki@cc.hut.fi> and even |
Frodo Looijaard <frodol@dds.nl> */ |
#ifndef _LINUX_I2C_ALGO_BIT_H |
#define _LINUX_I2C_ALGO_BIT_H |
/* --- Defines for bit-adapters --------------------------------------- */ |
/* |
* This struct contains the hw-dependent functions of bit-style adapters to |
* manipulate the line states, and to init any hw-specific features. This is |
* only used if you have more than one hw-type of adapter running. |
*/ |
struct i2c_algo_bit_data { |
void *data; /* private data for lowlevel routines */ |
void (*setsda) (void *data, int state); |
void (*setscl) (void *data, int state); |
int (*getsda) (void *data); |
int (*getscl) (void *data); |
/* local settings */ |
int udelay; /* half clock cycle time in us, |
minimum 2 us for fast-mode I2C, |
minimum 5 us for standard-mode I2C and SMBus, |
maximum 50 us for SMBus */ |
int timeout; /* in jiffies */ |
}; |
int i2c_bit_add_bus(struct i2c_adapter *); |
int i2c_bit_add_numbered_bus(struct i2c_adapter *); |
#endif /* _LINUX_I2C_ALGO_BIT_H */ |
/drivers/include/linux/i2c-id.h |
---|
0,0 → 1,59 |
/* ------------------------------------------------------------------------- */ |
/* */ |
/* i2c-id.h - identifier values for i2c drivers and adapters */ |
/* */ |
/* ------------------------------------------------------------------------- */ |
/* Copyright (C) 1995-1999 Simon G. Vogl |
This program is free software; you can redistribute it and/or modify |
it under the terms of the GNU General Public License as published by |
the Free Software Foundation; either version 2 of the License, or |
(at your option) any later version. |
This program is distributed in the hope that it will be useful, |
but WITHOUT ANY WARRANTY; without even the implied warranty of |
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
GNU General Public License for more details. |
You should have received a copy of the GNU General Public License |
along with this program; if not, write to the Free Software |
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ |
/* ------------------------------------------------------------------------- */ |
#ifndef LINUX_I2C_ID_H |
#define LINUX_I2C_ID_H |
/* Please note that I2C driver IDs are optional. They are only needed if a |
legacy chip driver needs to identify a bus or a bus driver needs to |
identify a legacy client. If you don't need them, just don't set them. */ |
/* |
* ---- Adapter types ---------------------------------------------------- |
*/ |
/* --- Bit algorithm adapters */ |
#define I2C_HW_B_BT848 0x010005 /* BT848 video boards */ |
#define I2C_HW_B_RIVA 0x010010 /* Riva based graphics cards */ |
#define I2C_HW_B_ZR36067 0x010019 /* Zoran-36057/36067 based boards */ |
#define I2C_HW_B_CX2388x 0x01001b /* connexant 2388x based tv cards */ |
#define I2C_HW_B_EM28XX 0x01001f /* em28xx video capture cards */ |
#define I2C_HW_B_CX2341X 0x010020 /* Conexant CX2341X MPEG encoder cards */ |
#define I2C_HW_B_CX23885 0x010022 /* conexant 23885 based tv cards (bus1) */ |
#define I2C_HW_B_AU0828 0x010023 /* auvitek au0828 usb bridge */ |
#define I2C_HW_B_CX231XX 0x010024 /* Conexant CX231XX USB based cards */ |
#define I2C_HW_B_HDPVR 0x010025 /* Hauppauge HD PVR */ |
/* --- SGI adapters */ |
#define I2C_HW_SGI_VINO 0x160000 |
/* --- SMBus only adapters */ |
#define I2C_HW_SMBUS_W9968CF 0x04000d |
#define I2C_HW_SMBUS_OV511 0x04000e /* OV511(+) USB 1.1 webcam ICs */ |
#define I2C_HW_SMBUS_OV518 0x04000f /* OV518(+) USB 1.1 webcam ICs */ |
#define I2C_HW_SMBUS_CAFE 0x040012 /* Marvell 88ALP01 "CAFE" cam */ |
/* --- Miscellaneous adapters */ |
#define I2C_HW_SAA7146 0x060000 /* SAA7146 video decoder bus */ |
#define I2C_HW_SAA7134 0x090000 /* SAA7134 video decoder bus */ |
#endif /* LINUX_I2C_ID_H */ |
/drivers/include/linux/i2c.h |
---|
0,0 → 1,299 |
/* ------------------------------------------------------------------------- */ |
/* */ |
/* i2c.h - definitions for the i2c-bus interface */ |
/* */ |
/* ------------------------------------------------------------------------- */ |
/* Copyright (C) 1995-2000 Simon G. Vogl |
This program is free software; you can redistribute it and/or modify |
it under the terms of the GNU General Public License as published by |
the Free Software Foundation; either version 2 of the License, or |
(at your option) any later version. |
This program is distributed in the hope that it will be useful, |
but WITHOUT ANY WARRANTY; without even the implied warranty of |
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
GNU General Public License for more details. |
You should have received a copy of the GNU General Public License |
along with this program; if not, write to the Free Software |
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ |
/* ------------------------------------------------------------------------- */ |
/* With some changes from Kyösti Mälkki <kmalkki@cc.hut.fi> and |
Frodo Looijaard <frodol@dds.nl> */ |
#ifndef _LINUX_I2C_H |
#define _LINUX_I2C_H |
#include <types.h> |
#include <list.h> |
#define I2C_NAME_SIZE 20 |
struct i2c_msg; |
struct i2c_algorithm; |
struct i2c_adapter; |
struct i2c_client; |
union i2c_smbus_data; |
/* Transfer num messages. |
*/ |
extern int i2c_transfer(struct i2c_adapter *adap, struct i2c_msg *msgs, |
int num); |
/** |
* struct i2c_client - represent an I2C slave device |
* @flags: I2C_CLIENT_TEN indicates the device uses a ten bit chip address; |
* I2C_CLIENT_PEC indicates it uses SMBus Packet Error Checking |
* @addr: Address used on the I2C bus connected to the parent adapter. |
* @name: Indicates the type of the device, usually a chip name that's |
* generic enough to hide second-sourcing and compatible revisions. |
* @adapter: manages the bus segment hosting this I2C device |
* @driver: device's driver, hence pointer to access routines |
* @dev: Driver model device node for the slave. |
* @irq: indicates the IRQ generated by this device (if any) |
* @detected: member of an i2c_driver.clients list or i2c-core's |
* userspace_devices list |
* |
* An i2c_client identifies a single device (i.e. chip) connected to an |
* i2c bus. The behaviour exposed to Linux is defined by the driver |
* managing the device. |
*/ |
struct i2c_client { |
unsigned short flags; /* div., see below */ |
unsigned short addr; /* chip address - NOTE: 7bit */ |
/* addresses are stored in the */ |
/* _LOWER_ 7 bits */ |
char name[I2C_NAME_SIZE]; |
struct i2c_adapter *adapter; /* the adapter we sit on */ |
// struct i2c_driver *driver; /* and our access routines */ |
// struct device dev; /* the device structure */ |
int irq; /* irq issued by device (or -1) */ |
struct list_head detected; |
}; |
#define to_i2c_client(d) container_of(d, struct i2c_client, dev) |
/* |
* The following structs are for those who like to implement new bus drivers: |
* i2c_algorithm is the interface to a class of hardware solutions which can |
* be addressed using the same bus algorithms - i.e. bit-banging or the PCF8584 |
* to name two of the most common. |
*/ |
struct i2c_algorithm { |
/* If an adapter algorithm can't do I2C-level access, set master_xfer |
to NULL. If an adapter algorithm can do SMBus access, set |
smbus_xfer. If set to NULL, the SMBus protocol is simulated |
using common I2C messages */ |
/* master_xfer should return the number of messages successfully |
processed, or a negative value on error */ |
int (*master_xfer)(struct i2c_adapter *adap, struct i2c_msg *msgs, |
int num); |
int (*smbus_xfer) (struct i2c_adapter *adap, u16 addr, |
unsigned short flags, char read_write, |
u8 command, int size, union i2c_smbus_data *data); |
/* To determine what the adapter supports */ |
u32 (*functionality) (struct i2c_adapter *); |
}; |
/* |
* i2c_adapter is the structure used to identify a physical i2c bus along |
* with the access algorithms necessary to access it. |
*/ |
struct i2c_adapter { |
unsigned int id; |
unsigned int class; /* classes to allow probing for */ |
const struct i2c_algorithm *algo; /* the algorithm to access the bus */ |
void *algo_data; |
/* data fields that are valid for all devices */ |
u8 level; /* nesting level for lockdep */ |
int timeout; /* in jiffies */ |
int retries; |
// struct device dev; /* the adapter device */ |
int nr; |
char name[48]; |
}; |
#define to_i2c_adapter(d) container_of(d, struct i2c_adapter, dev) |
/*flags for the client struct: */ |
#define I2C_CLIENT_PEC 0x04 /* Use Packet Error Checking */ |
#define I2C_CLIENT_TEN 0x10 /* we have a ten bit chip address */ |
/* Must equal I2C_M_TEN below */ |
#define I2C_CLIENT_WAKE 0x80 /* for board_info; true iff can wake */ |
/* i2c adapter classes (bitmask) */ |
#define I2C_CLASS_HWMON (1<<0) /* lm_sensors, ... */ |
#define I2C_CLASS_TV_ANALOG (1<<1) /* bttv + friends */ |
#define I2C_CLASS_TV_DIGITAL (1<<2) /* dvb cards */ |
#define I2C_CLASS_DDC (1<<3) /* DDC bus on graphics adapters */ |
#define I2C_CLASS_SPD (1<<7) /* SPD EEPROMs and similar */ |
/* i2c_client_address_data is the struct for holding default client |
* addresses for a driver and for the parameters supplied on the |
* command line |
*/ |
struct i2c_client_address_data { |
const unsigned short *normal_i2c; |
const unsigned short *probe; |
const unsigned short *ignore; |
const unsigned short * const *forces; |
}; |
/* Internal numbers to terminate lists */ |
#define I2C_CLIENT_END 0xfffeU |
/* The numbers to use to set I2C bus address */ |
#define ANY_I2C_BUS 0xffff |
/* Construct an I2C_CLIENT_END-terminated array of i2c addresses */ |
#define I2C_ADDRS(addr, addrs...) \ |
((const unsigned short []){ addr, ## addrs, I2C_CLIENT_END }) |
/** |
* struct i2c_msg - an I2C transaction segment beginning with START |
* @addr: Slave address, either seven or ten bits. When this is a ten |
* bit address, I2C_M_TEN must be set in @flags and the adapter |
* must support I2C_FUNC_10BIT_ADDR. |
* @flags: I2C_M_RD is handled by all adapters. No other flags may be |
* provided unless the adapter exported the relevant I2C_FUNC_* |
* flags through i2c_check_functionality(). |
* @len: Number of data bytes in @buf being read from or written to the |
* I2C slave address. For read transactions where I2C_M_RECV_LEN |
* is set, the caller guarantees that this buffer can hold up to |
* 32 bytes in addition to the initial length byte sent by the |
* slave (plus, if used, the SMBus PEC); and this value will be |
* incremented by the number of block data bytes received. |
* @buf: The buffer into which data is read, or from which it's written. |
* |
* An i2c_msg is the low level representation of one segment of an I2C |
* transaction. It is visible to drivers in the @i2c_transfer() procedure, |
* to userspace from i2c-dev, and to I2C adapter drivers through the |
* @i2c_adapter.@master_xfer() method. |
* |
* Except when I2C "protocol mangling" is used, all I2C adapters implement |
* the standard rules for I2C transactions. Each transaction begins with a |
* START. That is followed by the slave address, and a bit encoding read |
* versus write. Then follow all the data bytes, possibly including a byte |
* with SMBus PEC. The transfer terminates with a NAK, or when all those |
* bytes have been transferred and ACKed. If this is the last message in a |
* group, it is followed by a STOP. Otherwise it is followed by the next |
* @i2c_msg transaction segment, beginning with a (repeated) START. |
* |
* Alternatively, when the adapter supports I2C_FUNC_PROTOCOL_MANGLING then |
* passing certain @flags may have changed those standard protocol behaviors. |
* Those flags are only for use with broken/nonconforming slaves, and with |
* adapters which are known to support the specific mangling options they |
* need (one or more of IGNORE_NAK, NO_RD_ACK, NOSTART, and REV_DIR_ADDR). |
*/ |
struct i2c_msg { |
u16 addr; /* slave address */ |
u16 flags; |
#define I2C_M_TEN 0x0010 /* this is a ten bit chip address */ |
#define I2C_M_RD 0x0001 /* read data, from slave to master */ |
#define I2C_M_NOSTART 0x4000 /* if I2C_FUNC_PROTOCOL_MANGLING */ |
#define I2C_M_REV_DIR_ADDR 0x2000 /* if I2C_FUNC_PROTOCOL_MANGLING */ |
#define I2C_M_IGNORE_NAK 0x1000 /* if I2C_FUNC_PROTOCOL_MANGLING */ |
#define I2C_M_NO_RD_ACK 0x0800 /* if I2C_FUNC_PROTOCOL_MANGLING */ |
#define I2C_M_RECV_LEN 0x0400 /* length will be first received byte */ |
u16 len; /* msg length */ |
u8 *buf; /* pointer to msg data */ |
}; |
/* To determine what functionality is present */ |
#define I2C_FUNC_I2C 0x00000001 |
#define I2C_FUNC_10BIT_ADDR 0x00000002 |
#define I2C_FUNC_PROTOCOL_MANGLING 0x00000004 /* I2C_M_NOSTART etc. */ |
#define I2C_FUNC_SMBUS_PEC 0x00000008 |
#define I2C_FUNC_SMBUS_BLOCK_PROC_CALL 0x00008000 /* SMBus 2.0 */ |
#define I2C_FUNC_SMBUS_QUICK 0x00010000 |
#define I2C_FUNC_SMBUS_READ_BYTE 0x00020000 |
#define I2C_FUNC_SMBUS_WRITE_BYTE 0x00040000 |
#define I2C_FUNC_SMBUS_READ_BYTE_DATA 0x00080000 |
#define I2C_FUNC_SMBUS_WRITE_BYTE_DATA 0x00100000 |
#define I2C_FUNC_SMBUS_READ_WORD_DATA 0x00200000 |
#define I2C_FUNC_SMBUS_WRITE_WORD_DATA 0x00400000 |
#define I2C_FUNC_SMBUS_PROC_CALL 0x00800000 |
#define I2C_FUNC_SMBUS_READ_BLOCK_DATA 0x01000000 |
#define I2C_FUNC_SMBUS_WRITE_BLOCK_DATA 0x02000000 |
#define I2C_FUNC_SMBUS_READ_I2C_BLOCK 0x04000000 /* I2C-like block xfer */ |
#define I2C_FUNC_SMBUS_WRITE_I2C_BLOCK 0x08000000 /* w/ 1-byte reg. addr. */ |
#define I2C_FUNC_SMBUS_BYTE (I2C_FUNC_SMBUS_READ_BYTE | \ |
I2C_FUNC_SMBUS_WRITE_BYTE) |
#define I2C_FUNC_SMBUS_BYTE_DATA (I2C_FUNC_SMBUS_READ_BYTE_DATA | \ |
I2C_FUNC_SMBUS_WRITE_BYTE_DATA) |
#define I2C_FUNC_SMBUS_WORD_DATA (I2C_FUNC_SMBUS_READ_WORD_DATA | \ |
I2C_FUNC_SMBUS_WRITE_WORD_DATA) |
#define I2C_FUNC_SMBUS_BLOCK_DATA (I2C_FUNC_SMBUS_READ_BLOCK_DATA | \ |
I2C_FUNC_SMBUS_WRITE_BLOCK_DATA) |
#define I2C_FUNC_SMBUS_I2C_BLOCK (I2C_FUNC_SMBUS_READ_I2C_BLOCK | \ |
I2C_FUNC_SMBUS_WRITE_I2C_BLOCK) |
#define I2C_FUNC_SMBUS_EMUL (I2C_FUNC_SMBUS_QUICK | \ |
I2C_FUNC_SMBUS_BYTE | \ |
I2C_FUNC_SMBUS_BYTE_DATA | \ |
I2C_FUNC_SMBUS_WORD_DATA | \ |
I2C_FUNC_SMBUS_PROC_CALL | \ |
I2C_FUNC_SMBUS_WRITE_BLOCK_DATA | \ |
I2C_FUNC_SMBUS_I2C_BLOCK | \ |
I2C_FUNC_SMBUS_PEC) |
/* |
* Data for SMBus Messages |
*/ |
#define I2C_SMBUS_BLOCK_MAX 32 /* As specified in SMBus standard */ |
union i2c_smbus_data { |
__u8 byte; |
__u16 word; |
__u8 block[I2C_SMBUS_BLOCK_MAX + 2]; /* block[0] is used for length */ |
/* and one more for user-space compatibility */ |
}; |
/* i2c_smbus_xfer read or write markers */ |
#define I2C_SMBUS_READ 1 |
#define I2C_SMBUS_WRITE 0 |
/* SMBus transaction types (size parameter in the above functions) |
Note: these no longer correspond to the (arbitrary) PIIX4 internal codes! */ |
#define I2C_SMBUS_QUICK 0 |
#define I2C_SMBUS_BYTE 1 |
#define I2C_SMBUS_BYTE_DATA 2 |
#define I2C_SMBUS_WORD_DATA 3 |
#define I2C_SMBUS_PROC_CALL 4 |
#define I2C_SMBUS_BLOCK_DATA 5 |
#define I2C_SMBUS_I2C_BLOCK_BROKEN 6 |
#define I2C_SMBUS_BLOCK_PROC_CALL 7 /* SMBus 2.0 */ |
#define I2C_SMBUS_I2C_BLOCK_DATA 8 |
#endif /* _LINUX_I2C_H */ |
/drivers/include/linux/idr.h |
---|
0,0 → 1,144 |
/* |
* include/linux/idr.h |
* |
* 2002-10-18 written by Jim Houston jim.houston@ccur.com |
* Copyright (C) 2002 by Concurrent Computer Corporation |
* Distributed under the GNU GPL license version 2. |
* |
* Small id to pointer translation service avoiding fixed sized |
* tables. |
*/ |
#ifndef __IDR_H__ |
#define __IDR_H__ |
#include <types.h> |
#include <errno-base.h> |
//#include <linux/bitops.h> |
//#include <linux/init.h> |
//#include <linux/rcupdate.h> |
struct rcu_head { |
struct rcu_head *next; |
void (*func)(struct rcu_head *head); |
}; |
# define IDR_BITS 5 |
# define IDR_FULL 0xfffffffful |
/* We can only use two of the bits in the top level because there is |
only one possible bit in the top level (5 bits * 7 levels = 35 |
bits, but you only use 31 bits in the id). */ |
# define TOP_LEVEL_FULL (IDR_FULL >> 30) |
#define IDR_SIZE (1 << IDR_BITS) |
#define IDR_MASK ((1 << IDR_BITS)-1) |
#define MAX_ID_SHIFT (sizeof(int)*8 - 1) |
#define MAX_ID_BIT (1U << MAX_ID_SHIFT) |
#define MAX_ID_MASK (MAX_ID_BIT - 1) |
/* Leave the possibility of an incomplete final layer */ |
#define MAX_LEVEL (MAX_ID_SHIFT + IDR_BITS - 1) / IDR_BITS |
/* Number of id_layer structs to leave in free list */ |
#define IDR_FREE_MAX MAX_LEVEL + MAX_LEVEL |
struct idr_layer { |
unsigned long bitmap; /* A zero bit means "space here" */ |
struct idr_layer *ary[1<<IDR_BITS]; |
int count; /* When zero, we can release it */ |
int layer; /* distance from leaf */ |
struct rcu_head rcu_head; |
}; |
struct idr { |
struct idr_layer *top; |
struct idr_layer *id_free; |
int layers; /* only valid without concurrent changes */ |
int id_free_cnt; |
// spinlock_t lock; |
}; |
#define IDR_INIT(name) \ |
{ \ |
.top = NULL, \ |
.id_free = NULL, \ |
.layers = 0, \ |
.id_free_cnt = 0, \ |
// .lock = __SPIN_LOCK_UNLOCKED(name.lock), \ |
} |
#define DEFINE_IDR(name) struct idr name = IDR_INIT(name) |
/* Actions to be taken after a call to _idr_sub_alloc */ |
#define IDR_NEED_TO_GROW -2 |
#define IDR_NOMORE_SPACE -3 |
#define _idr_rc_to_errno(rc) ((rc) == -1 ? -EAGAIN : -ENOSPC) |
/** |
* idr synchronization (stolen from radix-tree.h) |
* |
* idr_find() is able to be called locklessly, using RCU. The caller must |
* ensure calls to this function are made within rcu_read_lock() regions. |
* Other readers (lock-free or otherwise) and modifications may be running |
* concurrently. |
* |
* It is still required that the caller manage the synchronization and |
* lifetimes of the items. So if RCU lock-free lookups are used, typically |
* this would mean that the items have their own locks, or are amenable to |
* lock-free access; and that the items are freed by RCU (or only freed after |
* having been deleted from the idr tree *and* a synchronize_rcu() grace |
* period). |
*/ |
/* |
* This is what we export. |
*/ |
void *idr_find(struct idr *idp, int id); |
int idr_pre_get(struct idr *idp, u32_t gfp_mask); |
int idr_get_new(struct idr *idp, void *ptr, int *id); |
int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id); |
int idr_for_each(struct idr *idp, |
int (*fn)(int id, void *p, void *data), void *data); |
void *idr_get_next(struct idr *idp, int *nextid); |
void *idr_replace(struct idr *idp, void *ptr, int id); |
void idr_remove(struct idr *idp, int id); |
void idr_remove_all(struct idr *idp); |
void idr_destroy(struct idr *idp); |
void idr_init(struct idr *idp); |
/* |
* IDA - IDR based id allocator, use when translation from id to |
* pointer isn't necessary. |
*/ |
#define IDA_CHUNK_SIZE 128 /* 128 bytes per chunk */ |
#define IDA_BITMAP_LONGS (128 / sizeof(long) - 1) |
#define IDA_BITMAP_BITS (IDA_BITMAP_LONGS * sizeof(long) * 8) |
struct ida_bitmap { |
long nr_busy; |
unsigned long bitmap[IDA_BITMAP_LONGS]; |
}; |
struct ida { |
struct idr idr; |
struct ida_bitmap *free_bitmap; |
}; |
#define IDA_INIT(name) { .idr = IDR_INIT(name), .free_bitmap = NULL, } |
#define DEFINE_IDA(name) struct ida name = IDA_INIT(name) |
int ida_pre_get(struct ida *ida, u32_t gfp_mask); |
int ida_get_new_above(struct ida *ida, int starting_id, int *p_id); |
int ida_get_new(struct ida *ida, int *p_id); |
void ida_remove(struct ida *ida, int id); |
void ida_destroy(struct ida *ida); |
void ida_init(struct ida *ida); |
void idr_init_cache(void); |
#endif /* __IDR_H__ */ |
/drivers/include/linux/kernel.h |
---|
0,0 → 1,140 |
#ifndef _LINUX_KERNEL_H |
#define _LINUX_KERNEL_H |
/* |
* 'kernel.h' contains some often-used function prototypes etc |
*/ |
#ifdef __KERNEL__ |
#include <stdarg.h> |
#include <linux/stddef.h> |
#include <linux/types.h> |
#include <linux/compiler.h> |
#define USHORT_MAX ((u16)(~0U)) |
#define SHORT_MAX ((s16)(USHORT_MAX>>1)) |
#define SHORT_MIN (-SHORT_MAX - 1) |
#define INT_MAX ((int)(~0U>>1)) |
#define INT_MIN (-INT_MAX - 1) |
#define UINT_MAX (~0U) |
#define LONG_MAX ((long)(~0UL>>1)) |
#define LONG_MIN (-LONG_MAX - 1) |
#define ULONG_MAX (~0UL) |
#define LLONG_MAX ((long long)(~0ULL>>1)) |
#define LLONG_MIN (-LLONG_MAX - 1) |
#define ULLONG_MAX (~0ULL) |
#define ALIGN(x,a) __ALIGN_MASK(x,(typeof(x))(a)-1) |
#define __ALIGN_MASK(x,mask) (((x)+(mask))&~(mask)) |
#define PTR_ALIGN(p, a) ((typeof(p))ALIGN((unsigned long)(p), (a))) |
#define IS_ALIGNED(x, a) (((x) & ((typeof(x))(a) - 1)) == 0) |
/** |
* upper_32_bits - return bits 32-63 of a number |
* @n: the number we're accessing |
* |
* A basic shift-right of a 64- or 32-bit quantity. Use this to suppress |
* the "right shift count >= width of type" warning when that quantity is |
* 32-bits. |
*/ |
#define upper_32_bits(n) ((u32)(((n) >> 16) >> 16)) |
/** |
* lower_32_bits - return bits 0-31 of a number |
* @n: the number we're accessing |
*/ |
#define lower_32_bits(n) ((u32)(n)) |
#define KERN_EMERG "<0>" /* system is unusable */ |
#define KERN_ALERT "<1>" /* action must be taken immediately */ |
#define KERN_CRIT "<2>" /* critical conditions */ |
#define KERN_ERR "<3>" /* error conditions */ |
#define KERN_WARNING "<4>" /* warning conditions */ |
#define KERN_NOTICE "<5>" /* normal but significant condition */ |
#define KERN_INFO "<6>" /* informational */ |
#define KERN_DEBUG "<7>" /* debug-level messages */ |
//int printk(const char *fmt, ...); |
#define printk(fmt, arg...) dbgprintf(fmt , ##arg) |
/* |
* min()/max()/clamp() macros that also do |
* strict type-checking.. See the |
* "unnecessary" pointer comparison. |
*/ |
#define min(x, y) ({ \ |
typeof(x) _min1 = (x); \ |
typeof(y) _min2 = (y); \ |
(void) (&_min1 == &_min2); \ |
_min1 < _min2 ? _min1 : _min2; }) |
#define max(x, y) ({ \ |
typeof(x) _max1 = (x); \ |
typeof(y) _max2 = (y); \ |
(void) (&_max1 == &_max2); \ |
_max1 > _max2 ? _max1 : _max2; }) |
/* |
* ..and if you can't take the strict |
* types, you can specify one yourself. |
* |
* Or not use min/max/clamp at all, of course. |
*/ |
#define min_t(type, x, y) ({ \ |
type __min1 = (x); \ |
type __min2 = (y); \ |
__min1 < __min2 ? __min1: __min2; }) |
#define max_t(type, x, y) ({ \ |
type __max1 = (x); \ |
type __max2 = (y); \ |
__max1 > __max2 ? __max1: __max2; }) |
/** |
* container_of - cast a member of a structure out to the containing structure |
* @ptr: the pointer to the member. |
* @type: the type of the container struct this is embedded in. |
* @member: the name of the member within the struct. |
* |
*/ |
#define container_of(ptr, type, member) ({ \ |
const typeof( ((type *)0)->member ) *__mptr = (ptr); \ |
(type *)( (char *)__mptr - offsetof(type,member) );}) |
static inline void *kcalloc(size_t n, size_t size, uint32_t flags) |
{ |
if (n != 0 && size > ULONG_MAX / n) |
return NULL; |
return kzalloc(n * size, 0); |
} |
#endif /* __KERNEL__ */ |
typedef unsigned long pgprotval_t; |
typedef struct pgprot { pgprotval_t pgprot; } pgprot_t; |
struct file {}; |
struct vm_area_struct {}; |
struct address_space {}; |
#define preempt_disable() do { } while (0) |
#define preempt_enable_no_resched() do { } while (0) |
#define preempt_enable() do { } while (0) |
#define preempt_check_resched() do { } while (0) |
#define preempt_disable_notrace() do { } while (0) |
#define preempt_enable_no_resched_notrace() do { } while (0) |
#define preempt_enable_notrace() do { } while (0) |
void free (void *ptr); |
#endif |
/drivers/include/linux/kref.h |
---|
0,0 → 1,29 |
/* |
* kref.c - library routines for handling generic reference counted objects |
* |
* Copyright (C) 2004 Greg Kroah-Hartman <greg@kroah.com> |
* Copyright (C) 2004 IBM Corp. |
* |
* based on kobject.h which was: |
* Copyright (C) 2002-2003 Patrick Mochel <mochel@osdl.org> |
* Copyright (C) 2002-2003 Open Source Development Labs |
* |
* This file is released under the GPLv2. |
* |
*/ |
#ifndef _KREF_H_ |
#define _KREF_H_ |
#include <linux/types.h> |
struct kref { |
atomic_t refcount; |
}; |
void kref_set(struct kref *kref, int num); |
void kref_init(struct kref *kref); |
void kref_get(struct kref *kref); |
int kref_put(struct kref *kref, void (*release) (struct kref *kref)); |
#endif /* _KREF_H_ */ |
/drivers/include/linux/list.h |
---|
0,0 → 1,700 |
#ifndef _LINUX_LIST_H |
#define _LINUX_LIST_H |
#include <linux/stddef.h> |
//#include <linux/poison.h> |
//#include <linux/prefetch.h> |
//#include <asm/system.h> |
/* |
* Simple doubly linked list implementation. |
* |
* Some of the internal functions ("__xxx") are useful when |
* manipulating whole lists rather than single entries, as |
* sometimes we already know the next/prev entries and we can |
* generate better code by using them directly rather than |
* using the generic single-entry routines. |
*/ |
#define LIST_POISON1 ((struct list_head*)0xFFFF0100) |
#define LIST_POISON2 ((struct list_head*)0xFFFF0200) |
#define prefetch(x) __builtin_prefetch(x) |
struct list_head { |
struct list_head *next, *prev; |
}; |
#define LIST_HEAD_INIT(name) { &(name), &(name) } |
#define LIST_HEAD(name) \ |
struct list_head name = LIST_HEAD_INIT(name) |
static inline void INIT_LIST_HEAD(struct list_head *list) |
{ |
list->next = list; |
list->prev = list; |
} |
/* |
* Insert a new entry between two known consecutive entries. |
* |
* This is only for internal list manipulation where we know |
* the prev/next entries already! |
*/ |
#ifndef CONFIG_DEBUG_LIST |
static inline void __list_add(struct list_head *new, |
struct list_head *prev, |
struct list_head *next) |
{ |
next->prev = new; |
new->next = next; |
new->prev = prev; |
prev->next = new; |
} |
#else |
extern void __list_add(struct list_head *new, |
struct list_head *prev, |
struct list_head *next); |
#endif |
/** |
* list_add - add a new entry |
* @new: new entry to be added |
* @head: list head to add it after |
* |
* Insert a new entry after the specified head. |
* This is good for implementing stacks. |
*/ |
static inline void list_add(struct list_head *new, struct list_head *head) |
{ |
__list_add(new, head, head->next); |
} |
/** |
* list_add_tail - add a new entry |
* @new: new entry to be added |
* @head: list head to add it before |
* |
* Insert a new entry before the specified head. |
* This is useful for implementing queues. |
*/ |
static inline void list_add_tail(struct list_head *new, struct list_head *head) |
{ |
__list_add(new, head->prev, head); |
} |
/* |
* Delete a list entry by making the prev/next entries |
* point to each other. |
* |
* This is only for internal list manipulation where we know |
* the prev/next entries already! |
*/ |
static inline void __list_del(struct list_head * prev, struct list_head * next) |
{ |
next->prev = prev; |
prev->next = next; |
} |
/** |
* list_del - deletes entry from list. |
* @entry: the element to delete from the list. |
* Note: list_empty() on entry does not return true after this, the entry is |
* in an undefined state. |
*/ |
#ifndef CONFIG_DEBUG_LIST |
static inline void list_del(struct list_head *entry) |
{ |
__list_del(entry->prev, entry->next); |
entry->next = LIST_POISON1; |
entry->prev = LIST_POISON2; |
} |
#else |
extern void list_del(struct list_head *entry); |
#endif |
/** |
* list_replace - replace old entry by new one |
* @old : the element to be replaced |
* @new : the new element to insert |
* |
* If @old was empty, it will be overwritten. |
*/ |
static inline void list_replace(struct list_head *old, |
struct list_head *new) |
{ |
new->next = old->next; |
new->next->prev = new; |
new->prev = old->prev; |
new->prev->next = new; |
} |
static inline void list_replace_init(struct list_head *old, |
struct list_head *new) |
{ |
list_replace(old, new); |
INIT_LIST_HEAD(old); |
} |
/** |
* list_del_init - deletes entry from list and reinitialize it. |
* @entry: the element to delete from the list. |
*/ |
static inline void list_del_init(struct list_head *entry) |
{ |
__list_del(entry->prev, entry->next); |
INIT_LIST_HEAD(entry); |
} |
/** |
* list_move - delete from one list and add as another's head |
* @list: the entry to move |
* @head: the head that will precede our entry |
*/ |
static inline void list_move(struct list_head *list, struct list_head *head) |
{ |
__list_del(list->prev, list->next); |
list_add(list, head); |
} |
/** |
* list_move_tail - delete from one list and add as another's tail |
* @list: the entry to move |
* @head: the head that will follow our entry |
*/ |
static inline void list_move_tail(struct list_head *list, |
struct list_head *head) |
{ |
__list_del(list->prev, list->next); |
list_add_tail(list, head); |
} |
/** |
* list_is_last - tests whether @list is the last entry in list @head |
* @list: the entry to test |
* @head: the head of the list |
*/ |
static inline int list_is_last(const struct list_head *list, |
const struct list_head *head) |
{ |
return list->next == head; |
} |
/** |
* list_empty - tests whether a list is empty |
* @head: the list to test. |
*/ |
static inline int list_empty(const struct list_head *head) |
{ |
return head->next == head; |
} |
/** |
* list_empty_careful - tests whether a list is empty and not being modified |
* @head: the list to test |
* |
* Description: |
* tests whether a list is empty _and_ checks that no other CPU might be |
* in the process of modifying either member (next or prev) |
* |
* NOTE: using list_empty_careful() without synchronization |
* can only be safe if the only activity that can happen |
* to the list entry is list_del_init(). Eg. it cannot be used |
* if another CPU could re-list_add() it. |
*/ |
static inline int list_empty_careful(const struct list_head *head) |
{ |
struct list_head *next = head->next; |
return (next == head) && (next == head->prev); |
} |
/** |
* list_is_singular - tests whether a list has just one entry. |
* @head: the list to test. |
*/ |
static inline int list_is_singular(const struct list_head *head) |
{ |
return !list_empty(head) && (head->next == head->prev); |
} |
static inline void __list_cut_position(struct list_head *list, |
struct list_head *head, struct list_head *entry) |
{ |
struct list_head *new_first = entry->next; |
list->next = head->next; |
list->next->prev = list; |
list->prev = entry; |
entry->next = list; |
head->next = new_first; |
new_first->prev = head; |
} |
/** |
* list_cut_position - cut a list into two |
* @list: a new list to add all removed entries |
* @head: a list with entries |
* @entry: an entry within head, could be the head itself |
* and if so we won't cut the list |
* |
* This helper moves the initial part of @head, up to and |
* including @entry, from @head to @list. You should |
* pass on @entry an element you know is on @head. @list |
* should be an empty list or a list you do not care about |
* losing its data. |
* |
*/ |
static inline void list_cut_position(struct list_head *list, |
struct list_head *head, struct list_head *entry) |
{ |
if (list_empty(head)) |
return; |
if (list_is_singular(head) && |
(head->next != entry && head != entry)) |
return; |
if (entry == head) |
INIT_LIST_HEAD(list); |
else |
__list_cut_position(list, head, entry); |
} |
static inline void __list_splice(const struct list_head *list, |
struct list_head *prev, |
struct list_head *next) |
{ |
struct list_head *first = list->next; |
struct list_head *last = list->prev; |
first->prev = prev; |
prev->next = first; |
last->next = next; |
next->prev = last; |
} |
/** |
* list_splice - join two lists, this is designed for stacks |
* @list: the new list to add. |
* @head: the place to add it in the first list. |
*/ |
static inline void list_splice(const struct list_head *list, |
struct list_head *head) |
{ |
if (!list_empty(list)) |
__list_splice(list, head, head->next); |
} |
/** |
* list_splice_tail - join two lists, each list being a queue |
* @list: the new list to add. |
* @head: the place to add it in the first list. |
*/ |
static inline void list_splice_tail(struct list_head *list, |
struct list_head *head) |
{ |
if (!list_empty(list)) |
__list_splice(list, head->prev, head); |
} |
/** |
* list_splice_init - join two lists and reinitialise the emptied list. |
* @list: the new list to add. |
* @head: the place to add it in the first list. |
* |
* The list at @list is reinitialised |
*/ |
static inline void list_splice_init(struct list_head *list, |
struct list_head *head) |
{ |
if (!list_empty(list)) { |
__list_splice(list, head, head->next); |
INIT_LIST_HEAD(list); |
} |
} |
/** |
* list_splice_tail_init - join two lists and reinitialise the emptied list |
* @list: the new list to add. |
* @head: the place to add it in the first list. |
* |
* Each of the lists is a queue. |
* The list at @list is reinitialised |
*/ |
static inline void list_splice_tail_init(struct list_head *list, |
struct list_head *head) |
{ |
if (!list_empty(list)) { |
__list_splice(list, head->prev, head); |
INIT_LIST_HEAD(list); |
} |
} |
/** |
* list_entry - get the struct for this entry |
* @ptr: the &struct list_head pointer. |
* @type: the type of the struct this is embedded in. |
* @member: the name of the list_struct within the struct. |
*/ |
#define list_entry(ptr, type, member) \ |
container_of(ptr, type, member) |
/** |
* list_first_entry - get the first element from a list |
* @ptr: the list head to take the element from. |
* @type: the type of the struct this is embedded in. |
* @member: the name of the list_struct within the struct. |
* |
* Note, that list is expected to be not empty. |
*/ |
#define list_first_entry(ptr, type, member) \ |
list_entry((ptr)->next, type, member) |
/** |
* list_for_each - iterate over a list |
* @pos: the &struct list_head to use as a loop cursor. |
* @head: the head for your list. |
*/ |
#define list_for_each(pos, head) \ |
for (pos = (head)->next; prefetch(pos->next), pos != (head); \ |
pos = pos->next) |
/** |
* __list_for_each - iterate over a list |
* @pos: the &struct list_head to use as a loop cursor. |
* @head: the head for your list. |
* |
* This variant differs from list_for_each() in that it's the |
* simplest possible list iteration code, no prefetching is done. |
* Use this for code that knows the list to be very short (empty |
* or 1 entry) most of the time. |
*/ |
#define __list_for_each(pos, head) \ |
for (pos = (head)->next; pos != (head); pos = pos->next) |
/** |
* list_for_each_prev - iterate over a list backwards |
* @pos: the &struct list_head to use as a loop cursor. |
* @head: the head for your list. |
*/ |
#define list_for_each_prev(pos, head) \ |
for (pos = (head)->prev; prefetch(pos->prev), pos != (head); \ |
pos = pos->prev) |
/** |
* list_for_each_safe - iterate over a list safe against removal of list entry |
* @pos: the &struct list_head to use as a loop cursor. |
* @n: another &struct list_head to use as temporary storage |
* @head: the head for your list. |
*/ |
#define list_for_each_safe(pos, n, head) \ |
for (pos = (head)->next, n = pos->next; pos != (head); \ |
pos = n, n = pos->next) |
/** |
* list_for_each_prev_safe - iterate over a list backwards safe against removal of list entry |
* @pos: the &struct list_head to use as a loop cursor. |
* @n: another &struct list_head to use as temporary storage |
* @head: the head for your list. |
*/ |
#define list_for_each_prev_safe(pos, n, head) \ |
for (pos = (head)->prev, n = pos->prev; \ |
prefetch(pos->prev), pos != (head); \ |
pos = n, n = pos->prev) |
/** |
* list_for_each_entry - iterate over list of given type |
* @pos: the type * to use as a loop cursor. |
* @head: the head for your list. |
* @member: the name of the list_struct within the struct. |
*/ |
#define list_for_each_entry(pos, head, member) \ |
for (pos = list_entry((head)->next, typeof(*pos), member); \ |
prefetch(pos->member.next), &pos->member != (head); \ |
pos = list_entry(pos->member.next, typeof(*pos), member)) |
/** |
* list_for_each_entry_reverse - iterate backwards over list of given type. |
* @pos: the type * to use as a loop cursor. |
* @head: the head for your list. |
* @member: the name of the list_struct within the struct. |
*/ |
#define list_for_each_entry_reverse(pos, head, member) \ |
for (pos = list_entry((head)->prev, typeof(*pos), member); \ |
prefetch(pos->member.prev), &pos->member != (head); \ |
pos = list_entry(pos->member.prev, typeof(*pos), member)) |
/** |
* list_prepare_entry - prepare a pos entry for use in list_for_each_entry_continue() |
* @pos: the type * to use as a start point |
* @head: the head of the list |
* @member: the name of the list_struct within the struct. |
* |
* Prepares a pos entry for use as a start point in list_for_each_entry_continue(). |
*/ |
#define list_prepare_entry(pos, head, member) \ |
((pos) ? : list_entry(head, typeof(*pos), member)) |
/** |
* list_for_each_entry_continue - continue iteration over list of given type |
* @pos: the type * to use as a loop cursor. |
* @head: the head for your list. |
* @member: the name of the list_struct within the struct. |
* |
* Continue to iterate over list of given type, continuing after |
* the current position. |
*/ |
#define list_for_each_entry_continue(pos, head, member) \ |
for (pos = list_entry(pos->member.next, typeof(*pos), member); \ |
prefetch(pos->member.next), &pos->member != (head); \ |
pos = list_entry(pos->member.next, typeof(*pos), member)) |
/** |
* list_for_each_entry_continue_reverse - iterate backwards from the given point |
* @pos: the type * to use as a loop cursor. |
* @head: the head for your list. |
* @member: the name of the list_struct within the struct. |
* |
* Start to iterate over list of given type backwards, continuing after |
* the current position. |
*/ |
#define list_for_each_entry_continue_reverse(pos, head, member) \ |
for (pos = list_entry(pos->member.prev, typeof(*pos), member); \ |
prefetch(pos->member.prev), &pos->member != (head); \ |
pos = list_entry(pos->member.prev, typeof(*pos), member)) |
/** |
* list_for_each_entry_from - iterate over list of given type from the current point |
* @pos: the type * to use as a loop cursor. |
* @head: the head for your list. |
* @member: the name of the list_struct within the struct. |
* |
* Iterate over list of given type, continuing from current position. |
*/ |
#define list_for_each_entry_from(pos, head, member) \ |
for (; prefetch(pos->member.next), &pos->member != (head); \ |
pos = list_entry(pos->member.next, typeof(*pos), member)) |
/** |
* list_for_each_entry_safe - iterate over list of given type safe against removal of list entry |
* @pos: the type * to use as a loop cursor. |
* @n: another type * to use as temporary storage |
* @head: the head for your list. |
* @member: the name of the list_struct within the struct. |
*/ |
#define list_for_each_entry_safe(pos, n, head, member) \ |
for (pos = list_entry((head)->next, typeof(*pos), member), \ |
n = list_entry(pos->member.next, typeof(*pos), member); \ |
&pos->member != (head); \ |
pos = n, n = list_entry(n->member.next, typeof(*n), member)) |
/** |
* list_for_each_entry_safe_continue |
* @pos: the type * to use as a loop cursor. |
* @n: another type * to use as temporary storage |
* @head: the head for your list. |
* @member: the name of the list_struct within the struct. |
* |
* Iterate over list of given type, continuing after current point, |
* safe against removal of list entry. |
*/ |
#define list_for_each_entry_safe_continue(pos, n, head, member) \ |
for (pos = list_entry(pos->member.next, typeof(*pos), member), \ |
n = list_entry(pos->member.next, typeof(*pos), member); \ |
&pos->member != (head); \ |
pos = n, n = list_entry(n->member.next, typeof(*n), member)) |
/** |
* list_for_each_entry_safe_from |
* @pos: the type * to use as a loop cursor. |
* @n: another type * to use as temporary storage |
* @head: the head for your list. |
* @member: the name of the list_struct within the struct. |
* |
* Iterate over list of given type from current point, safe against |
* removal of list entry. |
*/ |
#define list_for_each_entry_safe_from(pos, n, head, member) \ |
for (n = list_entry(pos->member.next, typeof(*pos), member); \ |
&pos->member != (head); \ |
pos = n, n = list_entry(n->member.next, typeof(*n), member)) |
/** |
* list_for_each_entry_safe_reverse |
* @pos: the type * to use as a loop cursor. |
* @n: another type * to use as temporary storage |
* @head: the head for your list. |
* @member: the name of the list_struct within the struct. |
* |
* Iterate backwards over list of given type, safe against removal |
* of list entry. |
*/ |
#define list_for_each_entry_safe_reverse(pos, n, head, member) \ |
for (pos = list_entry((head)->prev, typeof(*pos), member), \ |
n = list_entry(pos->member.prev, typeof(*pos), member); \ |
&pos->member != (head); \ |
pos = n, n = list_entry(n->member.prev, typeof(*n), member)) |
/* |
* Double linked lists with a single pointer list head. |
* Mostly useful for hash tables where the two pointer list head is |
* too wasteful. |
* You lose the ability to access the tail in O(1). |
*/ |
struct hlist_head { |
struct hlist_node *first; |
}; |
struct hlist_node { |
struct hlist_node *next, **pprev; |
}; |
#define HLIST_HEAD_INIT { .first = NULL } |
#define HLIST_HEAD(name) struct hlist_head name = { .first = NULL } |
#define INIT_HLIST_HEAD(ptr) ((ptr)->first = NULL) |
static inline void INIT_HLIST_NODE(struct hlist_node *h) |
{ |
h->next = NULL; |
h->pprev = NULL; |
} |
static inline int hlist_unhashed(const struct hlist_node *h) |
{ |
return !h->pprev; |
} |
static inline int hlist_empty(const struct hlist_head *h) |
{ |
return !h->first; |
} |
static inline void __hlist_del(struct hlist_node *n) |
{ |
struct hlist_node *next = n->next; |
struct hlist_node **pprev = n->pprev; |
*pprev = next; |
if (next) |
next->pprev = pprev; |
} |
static inline void hlist_del(struct hlist_node *n) |
{ |
__hlist_del(n); |
n->next = (struct hlist_node*)LIST_POISON1; |
n->pprev = (struct hlist_node**)LIST_POISON2; |
} |
static inline void hlist_del_init(struct hlist_node *n) |
{ |
if (!hlist_unhashed(n)) { |
__hlist_del(n); |
INIT_HLIST_NODE(n); |
} |
} |
static inline void hlist_add_head(struct hlist_node *n, struct hlist_head *h) |
{ |
struct hlist_node *first = h->first; |
n->next = first; |
if (first) |
first->pprev = &n->next; |
h->first = n; |
n->pprev = &h->first; |
} |
/* next must be != NULL */ |
static inline void hlist_add_before(struct hlist_node *n, |
struct hlist_node *next) |
{ |
n->pprev = next->pprev; |
n->next = next; |
next->pprev = &n->next; |
*(n->pprev) = n; |
} |
static inline void hlist_add_after(struct hlist_node *n, |
struct hlist_node *next) |
{ |
next->next = n->next; |
n->next = next; |
next->pprev = &n->next; |
if(next->next) |
next->next->pprev = &next->next; |
} |
/* |
* Move a list from one list head to another. Fixup the pprev |
* reference of the first entry if it exists. |
*/ |
static inline void hlist_move_list(struct hlist_head *old, |
struct hlist_head *new) |
{ |
new->first = old->first; |
if (new->first) |
new->first->pprev = &new->first; |
old->first = NULL; |
} |
#define hlist_entry(ptr, type, member) container_of(ptr,type,member) |
#define hlist_for_each(pos, head) \ |
for (pos = (head)->first; pos && ({ prefetch(pos->next); 1; }); \ |
pos = pos->next) |
#define hlist_for_each_safe(pos, n, head) \ |
for (pos = (head)->first; pos && ({ n = pos->next; 1; }); \ |
pos = n) |
/** |
* hlist_for_each_entry - iterate over list of given type |
* @tpos: the type * to use as a loop cursor. |
* @pos: the &struct hlist_node to use as a loop cursor. |
* @head: the head for your list. |
* @member: the name of the hlist_node within the struct. |
*/ |
#define hlist_for_each_entry(tpos, pos, head, member) \ |
for (pos = (head)->first; \ |
pos && ({ prefetch(pos->next); 1;}) && \ |
({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \ |
pos = pos->next) |
/** |
* hlist_for_each_entry_continue - iterate over a hlist continuing after current point |
* @tpos: the type * to use as a loop cursor. |
* @pos: the &struct hlist_node to use as a loop cursor. |
* @member: the name of the hlist_node within the struct. |
*/ |
#define hlist_for_each_entry_continue(tpos, pos, member) \ |
for (pos = (pos)->next; \ |
pos && ({ prefetch(pos->next); 1;}) && \ |
({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \ |
pos = pos->next) |
/** |
* hlist_for_each_entry_from - iterate over a hlist continuing from current point |
* @tpos: the type * to use as a loop cursor. |
* @pos: the &struct hlist_node to use as a loop cursor. |
* @member: the name of the hlist_node within the struct. |
*/ |
#define hlist_for_each_entry_from(tpos, pos, member) \ |
for (; pos && ({ prefetch(pos->next); 1;}) && \ |
({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \ |
pos = pos->next) |
/** |
* hlist_for_each_entry_safe - iterate over list of given type safe against removal of list entry |
* @tpos: the type * to use as a loop cursor. |
* @pos: the &struct hlist_node to use as a loop cursor. |
* @n: another &struct hlist_node to use as temporary storage |
* @head: the head for your list. |
* @member: the name of the hlist_node within the struct. |
*/ |
#define hlist_for_each_entry_safe(tpos, pos, n, head, member) \ |
for (pos = (head)->first; \ |
pos && ({ n = pos->next; 1; }) && \ |
({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \ |
pos = n) |
#endif |
/drivers/include/linux/list_sort.h |
---|
0,0 → 1,11 |
#ifndef _LINUX_LIST_SORT_H |
#define _LINUX_LIST_SORT_H |
#include <linux/types.h> |
struct list_head; |
void list_sort(void *priv, struct list_head *head, |
int (*cmp)(void *priv, struct list_head *a, |
struct list_head *b)); |
#endif |
/drivers/include/linux/lockdep.h |
---|
0,0 → 1,537 |
/* |
* Runtime locking correctness validator |
* |
* Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> |
* Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> |
* |
* see Documentation/lockdep-design.txt for more details. |
*/ |
#ifndef __LINUX_LOCKDEP_H |
#define __LINUX_LOCKDEP_H |
struct task_struct; |
struct lockdep_map; |
#ifdef CONFIG_LOCKDEP |
#include <linux/linkage.h> |
#include <linux/list.h> |
#include <linux/debug_locks.h> |
#include <linux/stacktrace.h> |
/* |
* We'd rather not expose kernel/lockdep_states.h this wide, but we do need |
* the total number of states... :-( |
*/ |
#define XXX_LOCK_USAGE_STATES (1+3*4) |
#define MAX_LOCKDEP_SUBCLASSES 8UL |
/* |
* Lock-classes are keyed via unique addresses, by embedding the |
* lockclass-key into the kernel (or module) .data section. (For |
* static locks we use the lock address itself as the key.) |
*/ |
struct lockdep_subclass_key { |
char __one_byte; |
} __attribute__ ((__packed__)); |
struct lock_class_key { |
struct lockdep_subclass_key subkeys[MAX_LOCKDEP_SUBCLASSES]; |
}; |
#define LOCKSTAT_POINTS 4 |
/* |
* The lock-class itself: |
*/ |
struct lock_class { |
/* |
* class-hash: |
*/ |
struct list_head hash_entry; |
/* |
* global list of all lock-classes: |
*/ |
struct list_head lock_entry; |
struct lockdep_subclass_key *key; |
unsigned int subclass; |
unsigned int dep_gen_id; |
/* |
* IRQ/softirq usage tracking bits: |
*/ |
unsigned long usage_mask; |
struct stack_trace usage_traces[XXX_LOCK_USAGE_STATES]; |
/* |
* These fields represent a directed graph of lock dependencies, |
* to every node we attach a list of "forward" and a list of |
* "backward" graph nodes. |
*/ |
struct list_head locks_after, locks_before; |
/* |
* Generation counter, when doing certain classes of graph walking, |
* to ensure that we check one node only once: |
*/ |
unsigned int version; |
/* |
* Statistics counter: |
*/ |
unsigned long ops; |
const char *name; |
int name_version; |
#ifdef CONFIG_LOCK_STAT |
unsigned long contention_point[LOCKSTAT_POINTS]; |
unsigned long contending_point[LOCKSTAT_POINTS]; |
#endif |
}; |
#ifdef CONFIG_LOCK_STAT |
struct lock_time { |
s64 min; |
s64 max; |
s64 total; |
unsigned long nr; |
}; |
enum bounce_type { |
bounce_acquired_write, |
bounce_acquired_read, |
bounce_contended_write, |
bounce_contended_read, |
nr_bounce_types, |
bounce_acquired = bounce_acquired_write, |
bounce_contended = bounce_contended_write, |
}; |
struct lock_class_stats { |
unsigned long contention_point[4]; |
unsigned long contending_point[4]; |
struct lock_time read_waittime; |
struct lock_time write_waittime; |
struct lock_time read_holdtime; |
struct lock_time write_holdtime; |
unsigned long bounces[nr_bounce_types]; |
}; |
struct lock_class_stats lock_stats(struct lock_class *class); |
void clear_lock_stats(struct lock_class *class); |
#endif |
/* |
* Map the lock object (the lock instance) to the lock-class object. |
* This is embedded into specific lock instances: |
*/ |
struct lockdep_map { |
struct lock_class_key *key; |
struct lock_class *class_cache; |
const char *name; |
#ifdef CONFIG_LOCK_STAT |
int cpu; |
unsigned long ip; |
#endif |
}; |
/* |
* Every lock has a list of other locks that were taken after it. |
* We only grow the list, never remove from it: |
*/ |
struct lock_list { |
struct list_head entry; |
struct lock_class *class; |
struct stack_trace trace; |
int distance; |
/* |
* The parent field is used to implement breadth-first search, and the |
* bit 0 is reused to indicate if the lock has been accessed in BFS. |
*/ |
struct lock_list *parent; |
}; |
/* |
* We record lock dependency chains, so that we can cache them: |
*/ |
struct lock_chain { |
u8 irq_context; |
u8 depth; |
u16 base; |
struct list_head entry; |
u64 chain_key; |
}; |
#define MAX_LOCKDEP_KEYS_BITS 13 |
/* |
* Subtract one because we offset hlock->class_idx by 1 in order |
* to make 0 mean no class. This avoids overflowing the class_idx |
* bitfield and hitting the BUG in hlock_class(). |
*/ |
#define MAX_LOCKDEP_KEYS ((1UL << MAX_LOCKDEP_KEYS_BITS) - 1) |
struct held_lock { |
/* |
* One-way hash of the dependency chain up to this point. We |
* hash the hashes step by step as the dependency chain grows. |
* |
* We use it for dependency-caching and we skip detection |
* passes and dependency-updates if there is a cache-hit, so |
* it is absolutely critical for 100% coverage of the validator |
* to have a unique key value for every unique dependency path |
* that can occur in the system, to make a unique hash value |
* as likely as possible - hence the 64-bit width. |
* |
* The task struct holds the current hash value (initialized |
* with zero), here we store the previous hash value: |
*/ |
u64 prev_chain_key; |
unsigned long acquire_ip; |
struct lockdep_map *instance; |
struct lockdep_map *nest_lock; |
#ifdef CONFIG_LOCK_STAT |
u64 waittime_stamp; |
u64 holdtime_stamp; |
#endif |
unsigned int class_idx:MAX_LOCKDEP_KEYS_BITS; |
/* |
* The lock-stack is unified in that the lock chains of interrupt |
* contexts nest ontop of process context chains, but we 'separate' |
* the hashes by starting with 0 if we cross into an interrupt |
* context, and we also keep do not add cross-context lock |
* dependencies - the lock usage graph walking covers that area |
* anyway, and we'd just unnecessarily increase the number of |
* dependencies otherwise. [Note: hardirq and softirq contexts |
* are separated from each other too.] |
* |
* The following field is used to detect when we cross into an |
* interrupt context: |
*/ |
unsigned int irq_context:2; /* bit 0 - soft, bit 1 - hard */ |
unsigned int trylock:1; /* 16 bits */ |
unsigned int read:2; /* see lock_acquire() comment */ |
unsigned int check:2; /* see lock_acquire() comment */ |
unsigned int hardirqs_off:1; |
unsigned int references:11; /* 32 bits */ |
}; |
/* |
* Initialization, self-test and debugging-output methods: |
*/ |
extern void lockdep_init(void); |
extern void lockdep_info(void); |
extern void lockdep_reset(void); |
extern void lockdep_reset_lock(struct lockdep_map *lock); |
extern void lockdep_free_key_range(void *start, unsigned long size); |
extern void lockdep_sys_exit(void); |
extern void lockdep_off(void); |
extern void lockdep_on(void); |
/* |
* These methods are used by specific locking variants (spinlocks, |
* rwlocks, mutexes and rwsems) to pass init/acquire/release events |
* to lockdep: |
*/ |
extern void lockdep_init_map(struct lockdep_map *lock, const char *name, |
struct lock_class_key *key, int subclass); |
/* |
* To initialize a lockdep_map statically use this macro. |
* Note that _name must not be NULL. |
*/ |
#define STATIC_LOCKDEP_MAP_INIT(_name, _key) \ |
{ .name = (_name), .key = (void *)(_key), } |
/* |
* Reinitialize a lock key - for cases where there is special locking or |
* special initialization of locks so that the validator gets the scope |
* of dependencies wrong: they are either too broad (they need a class-split) |
* or they are too narrow (they suffer from a false class-split): |
*/ |
#define lockdep_set_class(lock, key) \ |
lockdep_init_map(&(lock)->dep_map, #key, key, 0) |
#define lockdep_set_class_and_name(lock, key, name) \ |
lockdep_init_map(&(lock)->dep_map, name, key, 0) |
#define lockdep_set_class_and_subclass(lock, key, sub) \ |
lockdep_init_map(&(lock)->dep_map, #key, key, sub) |
#define lockdep_set_subclass(lock, sub) \ |
lockdep_init_map(&(lock)->dep_map, #lock, \ |
(lock)->dep_map.key, sub) |
/* |
* Compare locking classes |
*/ |
#define lockdep_match_class(lock, key) lockdep_match_key(&(lock)->dep_map, key) |
static inline int lockdep_match_key(struct lockdep_map *lock, |
struct lock_class_key *key) |
{ |
return lock->key == key; |
} |
/* |
* Acquire a lock. |
* |
* Values for "read": |
* |
* 0: exclusive (write) acquire |
* 1: read-acquire (no recursion allowed) |
* 2: read-acquire with same-instance recursion allowed |
* |
* Values for check: |
* |
* 0: disabled |
* 1: simple checks (freeing, held-at-exit-time, etc.) |
* 2: full validation |
*/ |
extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass, |
int trylock, int read, int check, |
struct lockdep_map *nest_lock, unsigned long ip); |
extern void lock_release(struct lockdep_map *lock, int nested, |
unsigned long ip); |
#define lockdep_is_held(lock) lock_is_held(&(lock)->dep_map) |
extern int lock_is_held(struct lockdep_map *lock); |
extern void lock_set_class(struct lockdep_map *lock, const char *name, |
struct lock_class_key *key, unsigned int subclass, |
unsigned long ip); |
static inline void lock_set_subclass(struct lockdep_map *lock, |
unsigned int subclass, unsigned long ip) |
{ |
lock_set_class(lock, lock->name, lock->key, subclass, ip); |
} |
extern void lockdep_set_current_reclaim_state(gfp_t gfp_mask); |
extern void lockdep_clear_current_reclaim_state(void); |
extern void lockdep_trace_alloc(gfp_t mask); |
# define INIT_LOCKDEP .lockdep_recursion = 0, .lockdep_reclaim_gfp = 0, |
#define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0) |
#define lockdep_assert_held(l) WARN_ON(debug_locks && !lockdep_is_held(l)) |
#else /* !LOCKDEP */ |
static inline void lockdep_off(void) |
{ |
} |
static inline void lockdep_on(void) |
{ |
} |
# define lock_acquire(l, s, t, r, c, n, i) do { } while (0) |
# define lock_release(l, n, i) do { } while (0) |
# define lock_set_class(l, n, k, s, i) do { } while (0) |
# define lock_set_subclass(l, s, i) do { } while (0) |
# define lockdep_set_current_reclaim_state(g) do { } while (0) |
# define lockdep_clear_current_reclaim_state() do { } while (0) |
# define lockdep_trace_alloc(g) do { } while (0) |
# define lockdep_init() do { } while (0) |
# define lockdep_info() do { } while (0) |
# define lockdep_init_map(lock, name, key, sub) \ |
do { (void)(name); (void)(key); } while (0) |
# define lockdep_set_class(lock, key) do { (void)(key); } while (0) |
# define lockdep_set_class_and_name(lock, key, name) \ |
do { (void)(key); (void)(name); } while (0) |
#define lockdep_set_class_and_subclass(lock, key, sub) \ |
do { (void)(key); } while (0) |
#define lockdep_set_subclass(lock, sub) do { } while (0) |
/* |
* We don't define lockdep_match_class() and lockdep_match_key() for !LOCKDEP |
* case since the result is not well defined and the caller should rather |
* #ifdef the call himself. |
*/ |
# define INIT_LOCKDEP |
# define lockdep_reset() do { debug_locks = 1; } while (0) |
# define lockdep_free_key_range(start, size) do { } while (0) |
# define lockdep_sys_exit() do { } while (0) |
/* |
* The class key takes no space if lockdep is disabled: |
*/ |
struct lock_class_key { }; |
#define lockdep_depth(tsk) (0) |
#define lockdep_assert_held(l) do { } while (0) |
#endif /* !LOCKDEP */ |
#ifdef CONFIG_LOCK_STAT |
extern void lock_contended(struct lockdep_map *lock, unsigned long ip); |
extern void lock_acquired(struct lockdep_map *lock, unsigned long ip); |
#define LOCK_CONTENDED(_lock, try, lock) \ |
do { \ |
if (!try(_lock)) { \ |
lock_contended(&(_lock)->dep_map, _RET_IP_); \ |
lock(_lock); \ |
} \ |
lock_acquired(&(_lock)->dep_map, _RET_IP_); \ |
} while (0) |
#else /* CONFIG_LOCK_STAT */ |
#define lock_contended(lockdep_map, ip) do {} while (0) |
#define lock_acquired(lockdep_map, ip) do {} while (0) |
#define LOCK_CONTENDED(_lock, try, lock) \ |
lock(_lock) |
#endif /* CONFIG_LOCK_STAT */ |
#ifdef CONFIG_LOCKDEP |
/* |
* On lockdep we dont want the hand-coded irq-enable of |
* _raw_*_lock_flags() code, because lockdep assumes |
* that interrupts are not re-enabled during lock-acquire: |
*/ |
#define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \ |
LOCK_CONTENDED((_lock), (try), (lock)) |
#else /* CONFIG_LOCKDEP */ |
#define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \ |
lockfl((_lock), (flags)) |
#endif /* CONFIG_LOCKDEP */ |
#ifdef CONFIG_GENERIC_HARDIRQS |
extern void early_init_irq_lock_class(void); |
#else |
static inline void early_init_irq_lock_class(void) |
{ |
} |
#endif |
#ifdef CONFIG_TRACE_IRQFLAGS |
extern void early_boot_irqs_off(void); |
extern void early_boot_irqs_on(void); |
extern void print_irqtrace_events(struct task_struct *curr); |
#else |
static inline void early_boot_irqs_off(void) |
{ |
} |
static inline void early_boot_irqs_on(void) |
{ |
} |
static inline void print_irqtrace_events(struct task_struct *curr) |
{ |
} |
#endif |
/* |
* For trivial one-depth nesting of a lock-class, the following |
* global define can be used. (Subsystems with multiple levels |
* of nesting should define their own lock-nesting subclasses.) |
*/ |
#define SINGLE_DEPTH_NESTING 1 |
/* |
* Map the dependency ops to NOP or to real lockdep ops, depending |
* on the per lock-class debug mode: |
*/ |
#ifdef CONFIG_DEBUG_LOCK_ALLOC |
# ifdef CONFIG_PROVE_LOCKING |
# define spin_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i) |
# define spin_acquire_nest(l, s, t, n, i) lock_acquire(l, s, t, 0, 2, n, i) |
# else |
# define spin_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i) |
# define spin_acquire_nest(l, s, t, n, i) lock_acquire(l, s, t, 0, 1, NULL, i) |
# endif |
# define spin_release(l, n, i) lock_release(l, n, i) |
#else |
# define spin_acquire(l, s, t, i) do { } while (0) |
# define spin_release(l, n, i) do { } while (0) |
#endif |
#ifdef CONFIG_DEBUG_LOCK_ALLOC |
# ifdef CONFIG_PROVE_LOCKING |
# define rwlock_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i) |
# define rwlock_acquire_read(l, s, t, i) lock_acquire(l, s, t, 2, 2, NULL, i) |
# else |
# define rwlock_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i) |
# define rwlock_acquire_read(l, s, t, i) lock_acquire(l, s, t, 2, 1, NULL, i) |
# endif |
# define rwlock_release(l, n, i) lock_release(l, n, i) |
#else |
# define rwlock_acquire(l, s, t, i) do { } while (0) |
# define rwlock_acquire_read(l, s, t, i) do { } while (0) |
# define rwlock_release(l, n, i) do { } while (0) |
#endif |
#ifdef CONFIG_DEBUG_LOCK_ALLOC |
# ifdef CONFIG_PROVE_LOCKING |
# define mutex_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i) |
# else |
# define mutex_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i) |
# endif |
# define mutex_release(l, n, i) lock_release(l, n, i) |
#else |
# define mutex_acquire(l, s, t, i) do { } while (0) |
# define mutex_release(l, n, i) do { } while (0) |
#endif |
#ifdef CONFIG_DEBUG_LOCK_ALLOC |
# ifdef CONFIG_PROVE_LOCKING |
# define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i) |
# define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 2, NULL, i) |
# else |
# define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i) |
# define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 1, NULL, i) |
# endif |
# define rwsem_release(l, n, i) lock_release(l, n, i) |
#else |
# define rwsem_acquire(l, s, t, i) do { } while (0) |
# define rwsem_acquire_read(l, s, t, i) do { } while (0) |
# define rwsem_release(l, n, i) do { } while (0) |
#endif |
#ifdef CONFIG_DEBUG_LOCK_ALLOC |
# ifdef CONFIG_PROVE_LOCKING |
# define lock_map_acquire(l) lock_acquire(l, 0, 0, 0, 2, NULL, _THIS_IP_) |
# else |
# define lock_map_acquire(l) lock_acquire(l, 0, 0, 0, 1, NULL, _THIS_IP_) |
# endif |
# define lock_map_release(l) lock_release(l, 1, _THIS_IP_) |
#else |
# define lock_map_acquire(l) do { } while (0) |
# define lock_map_release(l) do { } while (0) |
#endif |
#ifdef CONFIG_PROVE_LOCKING |
# define might_lock(lock) \ |
do { \ |
typecheck(struct lockdep_map *, &(lock)->dep_map); \ |
lock_acquire(&(lock)->dep_map, 0, 0, 0, 2, NULL, _THIS_IP_); \ |
lock_release(&(lock)->dep_map, 0, _THIS_IP_); \ |
} while (0) |
# define might_lock_read(lock) \ |
do { \ |
typecheck(struct lockdep_map *, &(lock)->dep_map); \ |
lock_acquire(&(lock)->dep_map, 0, 0, 1, 2, NULL, _THIS_IP_); \ |
lock_release(&(lock)->dep_map, 0, _THIS_IP_); \ |
} while (0) |
#else |
# define might_lock(lock) do { } while (0) |
# define might_lock_read(lock) do { } while (0) |
#endif |
#endif /* __LINUX_LOCKDEP_H */ |
/drivers/include/linux/module.h |
---|
0,0 → 1,15 |
#ifndef _LINUX_MODULE_H |
#define _LINUX_MODULE_H |
#include <linux/list.h> |
#include <linux/compiler.h> |
#define EXPORT_SYMBOL(x) |
#define MODULE_FIRMWARE(x) |
#endif /* _LINUX_MODULE_H */ |
/drivers/include/linux/pci.h |
---|
0,0 → 1,566 |
#include <types.h> |
#include <list.h> |
#ifndef __PCI_H__ |
#define __PCI_H__ |
#define PCI_ANY_ID (~0) |
#define PCI_CLASS_NOT_DEFINED 0x0000 |
#define PCI_CLASS_NOT_DEFINED_VGA 0x0001 |
#define PCI_BASE_CLASS_STORAGE 0x01 |
#define PCI_CLASS_STORAGE_SCSI 0x0100 |
#define PCI_CLASS_STORAGE_IDE 0x0101 |
#define PCI_CLASS_STORAGE_FLOPPY 0x0102 |
#define PCI_CLASS_STORAGE_IPI 0x0103 |
#define PCI_CLASS_STORAGE_RAID 0x0104 |
#define PCI_CLASS_STORAGE_SATA 0x0106 |
#define PCI_CLASS_STORAGE_SATA_AHCI 0x010601 |
#define PCI_CLASS_STORAGE_SAS 0x0107 |
#define PCI_CLASS_STORAGE_OTHER 0x0180 |
#define PCI_BASE_CLASS_NETWORK 0x02 |
#define PCI_CLASS_NETWORK_ETHERNET 0x0200 |
#define PCI_CLASS_NETWORK_TOKEN_RING 0x0201 |
#define PCI_CLASS_NETWORK_FDDI 0x0202 |
#define PCI_CLASS_NETWORK_ATM 0x0203 |
#define PCI_CLASS_NETWORK_OTHER 0x0280 |
#define PCI_BASE_CLASS_DISPLAY 0x03 |
#define PCI_CLASS_DISPLAY_VGA 0x0300 |
#define PCI_CLASS_DISPLAY_XGA 0x0301 |
#define PCI_CLASS_DISPLAY_3D 0x0302 |
#define PCI_CLASS_DISPLAY_OTHER 0x0380 |
#define PCI_BASE_CLASS_MULTIMEDIA 0x04 |
#define PCI_CLASS_MULTIMEDIA_VIDEO 0x0400 |
#define PCI_CLASS_MULTIMEDIA_AUDIO 0x0401 |
#define PCI_CLASS_MULTIMEDIA_PHONE 0x0402 |
#define PCI_CLASS_MULTIMEDIA_OTHER 0x0480 |
#define PCI_BASE_CLASS_MEMORY 0x05 |
#define PCI_CLASS_MEMORY_RAM 0x0500 |
#define PCI_CLASS_MEMORY_FLASH 0x0501 |
#define PCI_CLASS_MEMORY_OTHER 0x0580 |
#define PCI_BASE_CLASS_BRIDGE 0x06 |
#define PCI_CLASS_BRIDGE_HOST 0x0600 |
#define PCI_CLASS_BRIDGE_ISA 0x0601 |
#define PCI_CLASS_BRIDGE_EISA 0x0602 |
#define PCI_CLASS_BRIDGE_MC 0x0603 |
#define PCI_CLASS_BRIDGE_PCI 0x0604 |
#define PCI_CLASS_BRIDGE_PCMCIA 0x0605 |
#define PCI_CLASS_BRIDGE_NUBUS 0x0606 |
#define PCI_CLASS_BRIDGE_CARDBUS 0x0607 |
#define PCI_CLASS_BRIDGE_RACEWAY 0x0608 |
#define PCI_CLASS_BRIDGE_OTHER 0x0680 |
#define PCI_BASE_CLASS_COMMUNICATION 0x07 |
#define PCI_CLASS_COMMUNICATION_SERIAL 0x0700 |
#define PCI_CLASS_COMMUNICATION_PARALLEL 0x0701 |
#define PCI_CLASS_COMMUNICATION_MULTISERIAL 0x0702 |
#define PCI_CLASS_COMMUNICATION_MODEM 0x0703 |
#define PCI_CLASS_COMMUNICATION_OTHER 0x0780 |
#define PCI_BASE_CLASS_SYSTEM 0x08 |
#define PCI_CLASS_SYSTEM_PIC 0x0800 |
#define PCI_CLASS_SYSTEM_PIC_IOAPIC 0x080010 |
#define PCI_CLASS_SYSTEM_PIC_IOXAPIC 0x080020 |
#define PCI_CLASS_SYSTEM_DMA 0x0801 |
#define PCI_CLASS_SYSTEM_TIMER 0x0802 |
#define PCI_CLASS_SYSTEM_RTC 0x0803 |
#define PCI_CLASS_SYSTEM_PCI_HOTPLUG 0x0804 |
#define PCI_CLASS_SYSTEM_SDHCI 0x0805 |
#define PCI_CLASS_SYSTEM_OTHER 0x0880 |
#define PCI_BASE_CLASS_INPUT 0x09 |
#define PCI_CLASS_INPUT_KEYBOARD 0x0900 |
#define PCI_CLASS_INPUT_PEN 0x0901 |
#define PCI_CLASS_INPUT_MOUSE 0x0902 |
#define PCI_CLASS_INPUT_SCANNER 0x0903 |
#define PCI_CLASS_INPUT_GAMEPORT 0x0904 |
#define PCI_CLASS_INPUT_OTHER 0x0980 |
#define PCI_BASE_CLASS_DOCKING 0x0a |
#define PCI_CLASS_DOCKING_GENERIC 0x0a00 |
#define PCI_CLASS_DOCKING_OTHER 0x0a80 |
#define PCI_BASE_CLASS_PROCESSOR 0x0b |
#define PCI_CLASS_PROCESSOR_386 0x0b00 |
#define PCI_CLASS_PROCESSOR_486 0x0b01 |
#define PCI_CLASS_PROCESSOR_PENTIUM 0x0b02 |
#define PCI_CLASS_PROCESSOR_ALPHA 0x0b10 |
#define PCI_CLASS_PROCESSOR_POWERPC 0x0b20 |
#define PCI_CLASS_PROCESSOR_MIPS 0x0b30 |
#define PCI_CLASS_PROCESSOR_CO 0x0b40 |
#define PCI_BASE_CLASS_SERIAL 0x0c |
#define PCI_CLASS_SERIAL_FIREWIRE 0x0c00 |
#define PCI_CLASS_SERIAL_FIREWIRE_OHCI 0x0c0010 |
#define PCI_CLASS_SERIAL_ACCESS 0x0c01 |
#define PCI_CLASS_SERIAL_SSA 0x0c02 |
#define PCI_CLASS_SERIAL_USB 0x0c03 |
#define PCI_CLASS_SERIAL_USB_UHCI 0x0c0300 |
#define PCI_CLASS_SERIAL_USB_OHCI 0x0c0310 |
#define PCI_CLASS_SERIAL_USB_EHCI 0x0c0320 |
#define PCI_CLASS_SERIAL_FIBER 0x0c04 |
#define PCI_CLASS_SERIAL_SMBUS 0x0c05 |
#define PCI_BASE_CLASS_WIRELESS 0x0d |
#define PCI_CLASS_WIRELESS_RF_CONTROLLER 0x0d10 |
#define PCI_CLASS_WIRELESS_WHCI 0x0d1010 |
#define PCI_BASE_CLASS_INTELLIGENT 0x0e |
#define PCI_CLASS_INTELLIGENT_I2O 0x0e00 |
#define PCI_BASE_CLASS_SATELLITE 0x0f |
#define PCI_CLASS_SATELLITE_TV 0x0f00 |
#define PCI_CLASS_SATELLITE_AUDIO 0x0f01 |
#define PCI_CLASS_SATELLITE_VOICE 0x0f03 |
#define PCI_CLASS_SATELLITE_DATA 0x0f04 |
#define PCI_BASE_CLASS_CRYPT 0x10 |
#define PCI_CLASS_CRYPT_NETWORK 0x1000 |
#define PCI_CLASS_CRYPT_ENTERTAINMENT 0x1001 |
#define PCI_CLASS_CRYPT_OTHER 0x1080 |
#define PCI_BASE_CLASS_SIGNAL_PROCESSING 0x11 |
#define PCI_CLASS_SP_DPIO 0x1100 |
#define PCI_CLASS_SP_OTHER 0x1180 |
#define PCI_CLASS_OTHERS 0xff |
/* |
* Under PCI, each device has 256 bytes of configuration address space, |
* of which the first 64 bytes are standardized as follows: |
*/ |
#define PCI_VENDOR_ID 0x000 /* 16 bits */ |
#define PCI_DEVICE_ID 0x002 /* 16 bits */ |
#define PCI_COMMAND 0x004 /* 16 bits */ |
#define PCI_COMMAND_IO 0x001 /* Enable response in I/O space */ |
#define PCI_COMMAND_MEMORY 0x002 /* Enable response in Memory space */ |
#define PCI_COMMAND_MASTER 0x004 /* Enable bus mastering */ |
#define PCI_COMMAND_SPECIAL 0x008 /* Enable response to special cycles */ |
#define PCI_COMMAND_INVALIDATE 0x010 /* Use memory write and invalidate */ |
#define PCI_COMMAND_VGA_PALETTE 0x020 /* Enable palette snooping */ |
#define PCI_COMMAND_PARITY 0x040 /* Enable parity checking */ |
#define PCI_COMMAND_WAIT 0x080 /* Enable address/data stepping */ |
#define PCI_COMMAND_SERR 0x100 /* Enable SERR */ |
#define PCI_COMMAND_FAST_BACK 0x200 /* Enable back-to-back writes */ |
#define PCI_COMMAND_INTX_DISABLE 0x400 /* INTx Emulation Disable */ |
#define PCI_STATUS 0x006 /* 16 bits */ |
#define PCI_STATUS_CAP_LIST 0x010 /* Support Capability List */ |
#define PCI_STATUS_66MHZ 0x020 /* Support 66 Mhz PCI 2.1 bus */ |
#define PCI_STATUS_UDF 0x040 /* Support User Definable Features [obsolete] */ |
#define PCI_STATUS_FAST_BACK 0x080 /* Accept fast-back to back */ |
#define PCI_STATUS_PARITY 0x100 /* Detected parity error */ |
#define PCI_STATUS_DEVSEL_MASK 0x600 /* DEVSEL timing */ |
#define PCI_STATUS_DEVSEL_FAST 0x000 |
#define PCI_STATUS_DEVSEL_MEDIUM 0x200 |
#define PCI_STATUS_DEVSEL_SLOW 0x400 |
#define PCI_STATUS_SIG_TARGET_ABORT 0x800 /* Set on target abort */ |
#define PCI_STATUS_REC_TARGET_ABORT 0x1000 /* Master ack of " */ |
#define PCI_STATUS_REC_MASTER_ABORT 0x2000 /* Set on master abort */ |
#define PCI_STATUS_SIG_SYSTEM_ERROR 0x4000 /* Set when we drive SERR */ |
#define PCI_STATUS_DETECTED_PARITY 0x8000 /* Set on parity error */ |
#define PCI_CLASS_REVISION 0x08 /* High 24 bits are class, low 8 revision */ |
#define PCI_REVISION_ID 0x08 /* Revision ID */ |
#define PCI_CLASS_PROG 0x09 /* Reg. Level Programming Interface */ |
#define PCI_CLASS_DEVICE 0x0a /* Device class */ |
#define PCI_CACHE_LINE_SIZE 0x0c /* 8 bits */ |
#define PCI_LATENCY_TIMER 0x0d /* 8 bits */ |
#define PCI_HEADER_TYPE 0x0e /* 8 bits */ |
#define PCI_HEADER_TYPE_NORMAL 0 |
#define PCI_HEADER_TYPE_BRIDGE 1 |
#define PCI_HEADER_TYPE_CARDBUS 2 |
#define PCI_BIST 0x0f /* 8 bits */ |
#define PCI_BIST_CODE_MASK 0x0f /* Return result */ |
#define PCI_BIST_START 0x40 /* 1 to start BIST, 2 secs or less */ |
#define PCI_BIST_CAPABLE 0x80 /* 1 if BIST capable */ |
/* |
* Base addresses specify locations in memory or I/O space. |
* Decoded size can be determined by writing a value of |
* 0xffffffff to the register, and reading it back. Only |
* 1 bits are decoded. |
*/ |
#define PCI_BASE_ADDRESS_0 0x10 /* 32 bits */ |
#define PCI_BASE_ADDRESS_1 0x14 /* 32 bits [htype 0,1 only] */ |
#define PCI_BASE_ADDRESS_2 0x18 /* 32 bits [htype 0 only] */ |
#define PCI_BASE_ADDRESS_3 0x1c /* 32 bits */ |
#define PCI_BASE_ADDRESS_4 0x20 /* 32 bits */ |
#define PCI_BASE_ADDRESS_5 0x24 /* 32 bits */ |
#define PCI_BASE_ADDRESS_SPACE 0x01 /* 0 = memory, 1 = I/O */ |
#define PCI_BASE_ADDRESS_SPACE_IO 0x01 |
#define PCI_BASE_ADDRESS_SPACE_MEMORY 0x00 |
#define PCI_BASE_ADDRESS_MEM_TYPE_MASK 0x06 |
#define PCI_BASE_ADDRESS_MEM_TYPE_32 0x00 /* 32 bit address */ |
#define PCI_BASE_ADDRESS_MEM_TYPE_1M 0x02 /* Below 1M [obsolete] */ |
#define PCI_BASE_ADDRESS_MEM_TYPE_64 0x04 /* 64 bit address */ |
#define PCI_BASE_ADDRESS_MEM_PREFETCH 0x08 /* prefetchable? */ |
#define PCI_BASE_ADDRESS_MEM_MASK (~0x0fUL) |
#define PCI_BASE_ADDRESS_IO_MASK (~0x03UL) |
/* bit 1 is reserved if address_space = 1 */ |
#define PCI_ROM_ADDRESS1 0x38 /* Same as PCI_ROM_ADDRESS, but for htype 1 */ |
/* Header type 0 (normal devices) */ |
#define PCI_CARDBUS_CIS 0x28 |
#define PCI_SUBSYSTEM_VENDOR_ID 0x2c |
#define PCI_SUBSYSTEM_ID 0x2e |
#define PCI_ROM_ADDRESS 0x30 /* Bits 31..11 are address, 10..1 reserved */ |
#define PCI_ROM_ADDRESS_ENABLE 0x01 |
#define PCI_ROM_ADDRESS_MASK (~0x7ffUL) |
#define PCI_INTERRUPT_LINE 0x3c /* 8 bits */ |
#define PCI_INTERRUPT_PIN 0x3d /* 8 bits */ |
#define PCI_CB_SUBSYSTEM_VENDOR_ID 0x40 |
#define PCI_CB_SUBSYSTEM_ID 0x42 |
#define PCI_CAPABILITY_LIST 0x34 /* Offset of first capability list entry */ |
#define PCI_CB_CAPABILITY_LIST 0x14 |
/* Capability lists */ |
#define PCI_CAP_LIST_ID 0 /* Capability ID */ |
#define PCI_CAP_ID_PM 0x01 /* Power Management */ |
#define PCI_CAP_ID_AGP 0x02 /* Accelerated Graphics Port */ |
#define PCI_CAP_ID_VPD 0x03 /* Vital Product Data */ |
#define PCI_CAP_ID_SLOTID 0x04 /* Slot Identification */ |
#define PCI_CAP_ID_MSI 0x05 /* Message Signalled Interrupts */ |
#define PCI_CAP_ID_CHSWP 0x06 /* CompactPCI HotSwap */ |
#define PCI_CAP_ID_PCIX 0x07 /* PCI-X */ |
#define PCI_CAP_ID_HT 0x08 /* HyperTransport */ |
#define PCI_CAP_ID_VNDR 0x09 /* Vendor specific capability */ |
#define PCI_CAP_ID_SHPC 0x0C /* PCI Standard Hot-Plug Controller */ |
#define PCI_CAP_ID_EXP 0x10 /* PCI Express */ |
#define PCI_CAP_ID_MSIX 0x11 /* MSI-X */ |
#define PCI_CAP_LIST_NEXT 1 /* Next capability in the list */ |
#define PCI_CAP_FLAGS 2 /* Capability defined flags (16 bits) */ |
#define PCI_CAP_SIZEOF 4 |
/* AGP registers */ |
#define PCI_AGP_VERSION 2 /* BCD version number */ |
#define PCI_AGP_RFU 3 /* Rest of capability flags */ |
#define PCI_AGP_STATUS 4 /* Status register */ |
#define PCI_AGP_STATUS_RQ_MASK 0xff000000 /* Maximum number of requests - 1 */ |
#define PCI_AGP_STATUS_SBA 0x0200 /* Sideband addressing supported */ |
#define PCI_AGP_STATUS_64BIT 0x0020 /* 64-bit addressing supported */ |
#define PCI_AGP_STATUS_FW 0x0010 /* FW transfers supported */ |
#define PCI_AGP_STATUS_RATE4 0x0004 /* 4x transfer rate supported */ |
#define PCI_AGP_STATUS_RATE2 0x0002 /* 2x transfer rate supported */ |
#define PCI_AGP_STATUS_RATE1 0x0001 /* 1x transfer rate supported */ |
#define PCI_AGP_COMMAND 8 /* Control register */ |
#define PCI_AGP_COMMAND_RQ_MASK 0xff000000 /* Master: Maximum number of requests */ |
#define PCI_AGP_COMMAND_SBA 0x0200 /* Sideband addressing enabled */ |
#define PCI_AGP_COMMAND_AGP 0x0100 /* Allow processing of AGP transactions */ |
#define PCI_AGP_COMMAND_64BIT 0x0020 /* Allow processing of 64-bit addresses */ |
#define PCI_AGP_COMMAND_FW 0x0010 /* Force FW transfers */ |
#define PCI_AGP_COMMAND_RATE4 0x0004 /* Use 4x rate */ |
#define PCI_AGP_COMMAND_RATE2 0x0002 /* Use 2x rate */ |
#define PCI_AGP_COMMAND_RATE1 0x0001 /* Use 1x rate */ |
#define PCI_AGP_SIZEOF 12 |
#define PCI_MAP_REG_START 0x10 |
#define PCI_MAP_REG_END 0x28 |
#define PCI_MAP_ROM_REG 0x30 |
#define PCI_MAP_MEMORY 0x00000000 |
#define PCI_MAP_IO 0x00000001 |
#define PCI_MAP_MEMORY_TYPE 0x00000007 |
#define PCI_MAP_IO_TYPE 0x00000003 |
#define PCI_MAP_MEMORY_TYPE_32BIT 0x00000000 |
#define PCI_MAP_MEMORY_TYPE_32BIT_1M 0x00000002 |
#define PCI_MAP_MEMORY_TYPE_64BIT 0x00000004 |
#define PCI_MAP_MEMORY_TYPE_MASK 0x00000006 |
#define PCI_MAP_MEMORY_CACHABLE 0x00000008 |
#define PCI_MAP_MEMORY_ATTR_MASK 0x0000000e |
#define PCI_MAP_MEMORY_ADDRESS_MASK 0xfffffff0 |
#define PCI_MAP_IO_ATTR_MASK 0x00000003 |
#define PCI_MAP_IS_IO(b) ((b) & PCI_MAP_IO) |
#define PCI_MAP_IS_MEM(b) (!PCI_MAP_IS_IO(b)) |
#define PCI_MAP_IS64BITMEM(b) \ |
(((b) & PCI_MAP_MEMORY_TYPE_MASK) == PCI_MAP_MEMORY_TYPE_64BIT) |
#define PCIGETMEMORY(b) ((b) & PCI_MAP_MEMORY_ADDRESS_MASK) |
#define PCIGETMEMORY64HIGH(b) (*((CARD32*)&b + 1)) |
#define PCIGETMEMORY64(b) \ |
(PCIGETMEMORY(b) | ((CARD64)PCIGETMEMORY64HIGH(b) << 32)) |
#define PCI_MAP_IO_ADDRESS_MASK 0xfffffffc |
#define PCIGETIO(b) ((b) & PCI_MAP_IO_ADDRESS_MASK) |
#define PCI_MAP_ROM_DECODE_ENABLE 0x00000001 |
#define PCI_MAP_ROM_ADDRESS_MASK 0xfffff800 |
#define PCIGETROM(b) ((b) & PCI_MAP_ROM_ADDRESS_MASK) |
#ifndef PCI_DOM_MASK |
# define PCI_DOM_MASK 0x0ffu |
#endif |
#define PCI_DOMBUS_MASK (((PCI_DOM_MASK) << 8) | 0x0ffu) |
#define PCI_MAKE_TAG(b,d,f) ((((b) & (PCI_DOMBUS_MASK)) << 16) | \ |
(((d) & 0x00001fu) << 11) | \ |
(((f) & 0x000007u) << 8)) |
#define PCI_BUS_FROM_TAG(tag) (((tag) >> 16) & (PCI_DOMBUS_MASK)) |
#define PCI_DEV_FROM_TAG(tag) (((tag) & 0x0000f800u) >> 11) |
#define PCI_FUNC_FROM_TAG(tag) (((tag) & 0x00000700u) >> 8) |
#define PCI_DFN_FROM_TAG(tag) (((tag) & 0x0000ff00u) >> 8) |
#define PCI_DEVFN(slot, func) ((((slot) & 0x1f) << 3) | ((func) & 0x07)) |
#define PCI_SLOT(devfn) (((devfn) >> 3) & 0x1f) |
#define PCI_FUNC(devfn) ((devfn) & 0x07) |
typedef unsigned int PCITAG; |
extern inline PCITAG |
pciTag(int busnum, int devnum, int funcnum) |
{ |
return(PCI_MAKE_TAG(busnum,devnum,funcnum)); |
} |
struct resource |
{ |
resource_size_t start; |
resource_size_t end; |
// const char *name; |
unsigned long flags; |
// struct resource *parent, *sibling, *child; |
}; |
/* |
* IO resources have these defined flags. |
*/ |
#define IORESOURCE_BITS 0x000000ff /* Bus-specific bits */ |
#define IORESOURCE_IO 0x00000100 /* Resource type */ |
#define IORESOURCE_MEM 0x00000200 |
#define IORESOURCE_IRQ 0x00000400 |
#define IORESOURCE_DMA 0x00000800 |
#define IORESOURCE_PREFETCH 0x00001000 /* No side effects */ |
#define IORESOURCE_READONLY 0x00002000 |
#define IORESOURCE_CACHEABLE 0x00004000 |
#define IORESOURCE_RANGELENGTH 0x00008000 |
#define IORESOURCE_SHADOWABLE 0x00010000 |
#define IORESOURCE_BUS_HAS_VGA 0x00080000 |
#define IORESOURCE_DISABLED 0x10000000 |
#define IORESOURCE_UNSET 0x20000000 |
#define IORESOURCE_AUTO 0x40000000 |
#define IORESOURCE_BUSY 0x80000000 /* Driver has marked this resource busy */ |
/* ISA PnP IRQ specific bits (IORESOURCE_BITS) */ |
#define IORESOURCE_IRQ_HIGHEDGE (1<<0) |
#define IORESOURCE_IRQ_LOWEDGE (1<<1) |
#define IORESOURCE_IRQ_HIGHLEVEL (1<<2) |
#define IORESOURCE_IRQ_LOWLEVEL (1<<3) |
#define IORESOURCE_IRQ_SHAREABLE (1<<4) |
/* ISA PnP DMA specific bits (IORESOURCE_BITS) */ |
#define IORESOURCE_DMA_TYPE_MASK (3<<0) |
#define IORESOURCE_DMA_8BIT (0<<0) |
#define IORESOURCE_DMA_8AND16BIT (1<<0) |
#define IORESOURCE_DMA_16BIT (2<<0) |
#define IORESOURCE_DMA_MASTER (1<<2) |
#define IORESOURCE_DMA_BYTE (1<<3) |
#define IORESOURCE_DMA_WORD (1<<4) |
#define IORESOURCE_DMA_SPEED_MASK (3<<6) |
#define IORESOURCE_DMA_COMPATIBLE (0<<6) |
#define IORESOURCE_DMA_TYPEA (1<<6) |
#define IORESOURCE_DMA_TYPEB (2<<6) |
#define IORESOURCE_DMA_TYPEF (3<<6) |
/* ISA PnP memory I/O specific bits (IORESOURCE_BITS) */ |
#define IORESOURCE_MEM_WRITEABLE (1<<0) /* dup: IORESOURCE_READONLY */ |
#define IORESOURCE_MEM_CACHEABLE (1<<1) /* dup: IORESOURCE_CACHEABLE */ |
#define IORESOURCE_MEM_RANGELENGTH (1<<2) /* dup: IORESOURCE_RANGELENGTH */ |
#define IORESOURCE_MEM_TYPE_MASK (3<<3) |
#define IORESOURCE_MEM_8BIT (0<<3) |
#define IORESOURCE_MEM_16BIT (1<<3) |
#define IORESOURCE_MEM_8AND16BIT (2<<3) |
#define IORESOURCE_MEM_32BIT (3<<3) |
#define IORESOURCE_MEM_SHADOWABLE (1<<5) /* dup: IORESOURCE_SHADOWABLE */ |
#define IORESOURCE_MEM_EXPANSIONROM (1<<6) |
/* PCI ROM control bits (IORESOURCE_BITS) */ |
#define IORESOURCE_ROM_ENABLE (1<<0) /* ROM is enabled, same as PCI_ROM_ADDRESS_ENABLE */ |
#define IORESOURCE_ROM_SHADOW (1<<1) /* ROM is copy at C000:0 */ |
#define IORESOURCE_ROM_COPY (1<<2) /* ROM is alloc'd copy, resource field overlaid */ |
#define IORESOURCE_ROM_BIOS_COPY (1<<3) /* ROM is BIOS copy, resource field overlaid */ |
/* PCI control bits. Shares IORESOURCE_BITS with above PCI ROM. */ |
#define IORESOURCE_PCI_FIXED (1<<4) /* Do not move resource */ |
/* |
* For PCI devices, the region numbers are assigned this way: |
* |
* 0-5 standard PCI regions |
* 6 expansion ROM |
* 7-10 bridges: address space assigned to buses behind the bridge |
*/ |
#define PCI_ROM_RESOURCE 6 |
#define PCI_BRIDGE_RESOURCES 7 |
#define PCI_NUM_RESOURCES 11 |
#ifndef PCI_BUS_NUM_RESOURCES |
#define PCI_BUS_NUM_RESOURCES 8 |
#endif |
#define DEVICE_COUNT_RESOURCE 12 |
/* |
* The pci_dev structure is used to describe PCI devices. |
*/ |
struct pci_dev { |
// struct list_head bus_list; /* node in per-bus list */ |
// struct pci_bus *bus; /* bus this device is on */ |
// struct pci_bus *subordinate; /* bus this device bridges to */ |
// void *sysdata; /* hook for sys-specific extension */ |
// struct proc_dir_entry *procent; /* device entry in /proc/bus/pci */ |
// struct pci_slot *slot; /* Physical slot this device is in */ |
u32_t bus; |
u32_t devfn; /* encoded device & function index */ |
u16_t vendor; |
u16_t device; |
u16_t subsystem_vendor; |
u16_t subsystem_device; |
u32_t class; /* 3 bytes: (base,sub,prog-if) */ |
uint8_t revision; /* PCI revision, low byte of class word */ |
uint8_t hdr_type; /* PCI header type (`multi' flag masked out) */ |
uint8_t pcie_type; /* PCI-E device/port type */ |
uint8_t rom_base_reg; /* which config register controls the ROM */ |
uint8_t pin; /* which interrupt pin this device uses */ |
// struct pci_driver *driver; /* which driver has allocated this device */ |
uint64_t dma_mask; /* Mask of the bits of bus address this |
device implements. Normally this is |
0xffffffff. You only need to change |
this if your device has broken DMA |
or supports 64-bit transfers. */ |
// struct device_dma_parameters dma_parms; |
// pci_power_t current_state; /* Current operating state. In ACPI-speak, |
// this is D0-D3, D0 being fully functional, |
// and D3 being off. */ |
// int pm_cap; /* PM capability offset in the |
// configuration space */ |
unsigned int pme_support:5; /* Bitmask of states from which PME# |
can be generated */ |
unsigned int d1_support:1; /* Low power state D1 is supported */ |
unsigned int d2_support:1; /* Low power state D2 is supported */ |
unsigned int no_d1d2:1; /* Only allow D0 and D3 */ |
// pci_channel_state_t error_state; /* current connectivity state */ |
// struct device dev; /* Generic device interface */ |
// int cfg_size; /* Size of configuration space */ |
/* |
* Instead of touching interrupt line and base address registers |
* directly, use the values stored here. They might be different! |
*/ |
unsigned int irq; |
struct resource resource[DEVICE_COUNT_RESOURCE]; /* I/O and memory regions + expansion ROMs */ |
/* These fields are used by common fixups */ |
unsigned int transparent:1; /* Transparent PCI bridge */ |
unsigned int multifunction:1;/* Part of multi-function device */ |
/* keep track of device state */ |
unsigned int is_added:1; |
unsigned int is_busmaster:1; /* device is busmaster */ |
unsigned int no_msi:1; /* device may not use msi */ |
unsigned int block_ucfg_access:1; /* userspace config space access is blocked */ |
unsigned int broken_parity_status:1; /* Device generates false positive parity */ |
unsigned int irq_reroute_variant:2; /* device needs IRQ rerouting variant */ |
unsigned int msi_enabled:1; |
unsigned int msix_enabled:1; |
unsigned int ari_enabled:1; /* ARI forwarding */ |
unsigned int is_managed:1; |
unsigned int is_pcie:1; |
unsigned int state_saved:1; |
unsigned int is_physfn:1; |
unsigned int is_virtfn:1; |
// pci_dev_flags_t dev_flags; |
// atomic_t enable_cnt; /* pci_enable_device has been called */ |
// u32 saved_config_space[16]; /* config space saved at suspend time */ |
// struct hlist_head saved_cap_space; |
// struct bin_attribute *rom_attr; /* attribute descriptor for sysfs ROM entry */ |
// int rom_attr_enabled; /* has display of the rom attribute been enabled? */ |
// struct bin_attribute *res_attr[DEVICE_COUNT_RESOURCE]; /* sysfs file for resources */ |
// struct bin_attribute *res_attr_wc[DEVICE_COUNT_RESOURCE]; /* sysfs file for WC mapping of resources */ |
}; |
#define pci_resource_start(dev, bar) ((dev)->resource[(bar)].start) |
#define pci_resource_end(dev, bar) ((dev)->resource[(bar)].end) |
#define pci_resource_flags(dev, bar) ((dev)->resource[(bar)].flags) |
#define pci_resource_len(dev,bar) \ |
((pci_resource_start((dev), (bar)) == 0 && \ |
pci_resource_end((dev), (bar)) == \ |
pci_resource_start((dev), (bar))) ? 0 : \ |
\ |
(pci_resource_end((dev), (bar)) - \ |
pci_resource_start((dev), (bar)) + 1)) |
struct pci_device_id |
{ |
u16_t vendor, device; /* Vendor and device ID or PCI_ANY_ID*/ |
u16_t subvendor, subdevice; /* Subsystem ID's or PCI_ANY_ID */ |
u32_t class, class_mask; /* (class,subclass,prog-if) triplet */ |
u32_t driver_data; /* Data private to the driver */ |
}; |
typedef struct |
{ |
struct list_head link; |
struct pci_dev pci_dev; |
}pci_dev_t; |
int enum_pci_devices(void); |
struct pci_device_id* |
find_pci_device(pci_dev_t* pdev, struct pci_device_id *idlist); |
#define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1)) |
int pci_set_dma_mask(struct pci_dev *dev, u64 mask); |
#define pci_name(x) "radeon" |
#endif //__PCI__H__ |
/drivers/include/linux/posix_types.h |
---|
0,0 → 1,49 |
#ifndef _LINUX_POSIX_TYPES_H |
#define _LINUX_POSIX_TYPES_H |
#include <linux/stddef.h> |
/* |
* This allows for 1024 file descriptors: if NR_OPEN is ever grown |
* beyond that you'll have to change this too. But 1024 fd's seem to be |
* enough even for such "real" unices like OSF/1, so hopefully this is |
* one limit that doesn't have to be changed [again]. |
* |
* Note that POSIX wants the FD_CLEAR(fd,fdsetp) defines to be in |
* <sys/time.h> (and thus <linux/time.h>) - but this is a more logical |
* place for them. Solved by having dummy defines in <sys/time.h>. |
*/ |
/* |
* Those macros may have been defined in <gnu/types.h>. But we always |
* use the ones here. |
*/ |
#undef __NFDBITS |
#define __NFDBITS (8 * sizeof(unsigned long)) |
#undef __FD_SETSIZE |
#define __FD_SETSIZE 1024 |
#undef __FDSET_LONGS |
#define __FDSET_LONGS (__FD_SETSIZE/__NFDBITS) |
#undef __FDELT |
#define __FDELT(d) ((d) / __NFDBITS) |
#undef __FDMASK |
#define __FDMASK(d) (1UL << ((d) % __NFDBITS)) |
typedef struct { |
unsigned long fds_bits [__FDSET_LONGS]; |
} __kernel_fd_set; |
/* Type of a signal handler. */ |
typedef void (*__kernel_sighandler_t)(int); |
/* Type of a SYSV IPC key. */ |
typedef int __kernel_key_t; |
typedef int __kernel_mqd_t; |
#include <asm/posix_types.h> |
#endif /* _LINUX_POSIX_TYPES_H */ |
/drivers/include/linux/sched.h |
---|
0,0 → 1,29 |
/* stub */ |
static inline void mdelay(unsigned long time) |
{ |
time /= 10; |
if(!time) time = 1; |
__asm__ __volatile__ ( |
"call *__imp__Delay" |
::"b" (time)); |
__asm__ __volatile__ ( |
"":::"ebx"); |
}; |
static inline void udelay(unsigned long delay) |
{ |
if(!delay) delay++; |
delay*= 500; |
while(delay--) |
{ |
__asm__ __volatile__( |
"xorl %%eax, %%eax \n\t" |
"cpuid" |
:::"eax","ebx","ecx","edx" ); |
} |
} |
/drivers/include/linux/seq_file.h |
---|
0,0 → 1,3 |
/* stub */ |
#include <errno.h> |
/drivers/include/linux/spinlock.h |
---|
0,0 → 1,347 |
#ifndef __LINUX_SPINLOCK_H |
#define __LINUX_SPINLOCK_H |
/* |
* include/linux/spinlock.h - generic spinlock/rwlock declarations |
* |
* here's the role of the various spinlock/rwlock related include files: |
* |
* on SMP builds: |
* |
* asm/spinlock_types.h: contains the raw_spinlock_t/raw_rwlock_t and the |
* initializers |
* |
* linux/spinlock_types.h: |
* defines the generic type and initializers |
* |
* asm/spinlock.h: contains the __raw_spin_*()/etc. lowlevel |
* implementations, mostly inline assembly code |
* |
* (also included on UP-debug builds:) |
* |
* linux/spinlock_api_smp.h: |
* contains the prototypes for the _spin_*() APIs. |
* |
* linux/spinlock.h: builds the final spin_*() APIs. |
* |
* on UP builds: |
* |
* linux/spinlock_type_up.h: |
* contains the generic, simplified UP spinlock type. |
* (which is an empty structure on non-debug builds) |
* |
* linux/spinlock_types.h: |
* defines the generic type and initializers |
* |
* linux/spinlock_up.h: |
* contains the __raw_spin_*()/etc. version of UP |
* builds. (which are NOPs on non-debug, non-preempt |
* builds) |
* |
* (included on UP-non-debug builds:) |
* |
* linux/spinlock_api_up.h: |
* builds the _spin_*() APIs. |
* |
* linux/spinlock.h: builds the final spin_*() APIs. |
*/ |
#include <linux/typecheck.h> |
//#include <linux/preempt.h> |
//#include <linux/linkage.h> |
#include <linux/compiler.h> |
//#include <linux/thread_info.h> |
#include <linux/kernel.h> |
#include <linux/stringify.h> |
//#include <linux/bottom_half.h> |
//#include <asm/system.h> |
/* |
* Must define these before including other files, inline functions need them |
*/ |
#define LOCK_SECTION_NAME ".text.lock."KBUILD_BASENAME |
#define LOCK_SECTION_START(extra) \ |
".subsection 1\n\t" \ |
extra \ |
".ifndef " LOCK_SECTION_NAME "\n\t" \ |
LOCK_SECTION_NAME ":\n\t" \ |
".endif\n" |
#define LOCK_SECTION_END \ |
".previous\n\t" |
#define __lockfunc __attribute__((section(".spinlock.text"))) |
/* |
* Pull the raw_spinlock_t and raw_rwlock_t definitions: |
*/ |
#include <linux/spinlock_types.h> |
/* |
* Pull the __raw*() functions/declarations (UP-nondebug doesnt need them): |
*/ |
#ifdef CONFIG_SMP |
# include <asm/spinlock.h> |
#else |
# include <linux/spinlock_up.h> |
#endif |
#ifdef CONFIG_DEBUG_SPINLOCK |
extern void __spin_lock_init(spinlock_t *lock, const char *name, |
struct lock_class_key *key); |
# define spin_lock_init(lock) \ |
do { \ |
static struct lock_class_key __key; \ |
\ |
__spin_lock_init((lock), #lock, &__key); \ |
} while (0) |
#else |
# define spin_lock_init(lock) \ |
do { *(lock) = __SPIN_LOCK_UNLOCKED(lock); } while (0) |
#endif |
#ifdef CONFIG_DEBUG_SPINLOCK |
extern void __rwlock_init(rwlock_t *lock, const char *name, |
struct lock_class_key *key); |
# define rwlock_init(lock) \ |
do { \ |
static struct lock_class_key __key; \ |
\ |
__rwlock_init((lock), #lock, &__key); \ |
} while (0) |
#else |
# define rwlock_init(lock) \ |
do { *(lock) = __RW_LOCK_UNLOCKED(lock); } while (0) |
#endif |
#define spin_is_locked(lock) __raw_spin_is_locked(&(lock)->raw_lock) |
#ifdef CONFIG_GENERIC_LOCKBREAK |
#define spin_is_contended(lock) ((lock)->break_lock) |
#else |
#ifdef __raw_spin_is_contended |
#define spin_is_contended(lock) __raw_spin_is_contended(&(lock)->raw_lock) |
#else |
#define spin_is_contended(lock) (((void)(lock), 0)) |
#endif /*__raw_spin_is_contended*/ |
#endif |
/* The lock does not imply full memory barrier. */ |
#ifndef ARCH_HAS_SMP_MB_AFTER_LOCK |
static inline void smp_mb__after_lock(void) { smp_mb(); } |
#endif |
/** |
* spin_unlock_wait - wait until the spinlock gets unlocked |
* @lock: the spinlock in question. |
*/ |
#define spin_unlock_wait(lock) __raw_spin_unlock_wait(&(lock)->raw_lock) |
#ifdef CONFIG_DEBUG_SPINLOCK |
extern void _raw_spin_lock(spinlock_t *lock); |
#define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock) |
extern int _raw_spin_trylock(spinlock_t *lock); |
extern void _raw_spin_unlock(spinlock_t *lock); |
extern void _raw_read_lock(rwlock_t *lock); |
#define _raw_read_lock_flags(lock, flags) _raw_read_lock(lock) |
extern int _raw_read_trylock(rwlock_t *lock); |
extern void _raw_read_unlock(rwlock_t *lock); |
extern void _raw_write_lock(rwlock_t *lock); |
#define _raw_write_lock_flags(lock, flags) _raw_write_lock(lock) |
extern int _raw_write_trylock(rwlock_t *lock); |
extern void _raw_write_unlock(rwlock_t *lock); |
#else |
# define _raw_spin_lock(lock) __raw_spin_lock(&(lock)->raw_lock) |
# define _raw_spin_lock_flags(lock, flags) \ |
__raw_spin_lock_flags(&(lock)->raw_lock, *(flags)) |
# define _raw_spin_trylock(lock) __raw_spin_trylock(&(lock)->raw_lock) |
# define _raw_spin_unlock(lock) __raw_spin_unlock(&(lock)->raw_lock) |
# define _raw_read_lock(rwlock) __raw_read_lock(&(rwlock)->raw_lock) |
# define _raw_read_lock_flags(lock, flags) \ |
__raw_read_lock_flags(&(lock)->raw_lock, *(flags)) |
# define _raw_read_trylock(rwlock) __raw_read_trylock(&(rwlock)->raw_lock) |
# define _raw_read_unlock(rwlock) __raw_read_unlock(&(rwlock)->raw_lock) |
# define _raw_write_lock(rwlock) __raw_write_lock(&(rwlock)->raw_lock) |
# define _raw_write_lock_flags(lock, flags) \ |
__raw_write_lock_flags(&(lock)->raw_lock, *(flags)) |
# define _raw_write_trylock(rwlock) __raw_write_trylock(&(rwlock)->raw_lock) |
# define _raw_write_unlock(rwlock) __raw_write_unlock(&(rwlock)->raw_lock) |
#endif |
#define read_can_lock(rwlock) __raw_read_can_lock(&(rwlock)->raw_lock) |
#define write_can_lock(rwlock) __raw_write_can_lock(&(rwlock)->raw_lock) |
/* |
* Define the various spin_lock and rw_lock methods. Note we define these |
* regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The various |
* methods are defined as nops in the case they are not required. |
*/ |
#define spin_trylock(lock) __cond_lock(lock, _spin_trylock(lock)) |
#define read_trylock(lock) __cond_lock(lock, _read_trylock(lock)) |
#define write_trylock(lock) __cond_lock(lock, _write_trylock(lock)) |
#define spin_lock(lock) _spin_lock(lock) |
#ifdef CONFIG_DEBUG_LOCK_ALLOC |
# define spin_lock_nested(lock, subclass) _spin_lock_nested(lock, subclass) |
# define spin_lock_nest_lock(lock, nest_lock) \ |
do { \ |
typecheck(struct lockdep_map *, &(nest_lock)->dep_map);\ |
_spin_lock_nest_lock(lock, &(nest_lock)->dep_map); \ |
} while (0) |
#else |
# define spin_lock_nested(lock, subclass) _spin_lock(lock) |
# define spin_lock_nest_lock(lock, nest_lock) _spin_lock(lock) |
#endif |
#define write_lock(lock) _write_lock(lock) |
#define read_lock(lock) _read_lock(lock) |
#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) |
#define spin_lock_irqsave(lock, flags) \ |
do { \ |
typecheck(unsigned long, flags); \ |
flags = _spin_lock_irqsave(lock); \ |
} while (0) |
#define read_lock_irqsave(lock, flags) \ |
do { \ |
typecheck(unsigned long, flags); \ |
flags = _read_lock_irqsave(lock); \ |
} while (0) |
#define write_lock_irqsave(lock, flags) \ |
do { \ |
typecheck(unsigned long, flags); \ |
flags = _write_lock_irqsave(lock); \ |
} while (0) |
#ifdef CONFIG_DEBUG_LOCK_ALLOC |
#define spin_lock_irqsave_nested(lock, flags, subclass) \ |
do { \ |
typecheck(unsigned long, flags); \ |
flags = _spin_lock_irqsave_nested(lock, subclass); \ |
} while (0) |
#else |
#define spin_lock_irqsave_nested(lock, flags, subclass) \ |
do { \ |
typecheck(unsigned long, flags); \ |
flags = _spin_lock_irqsave(lock); \ |
} while (0) |
#endif |
#else |
#define spin_lock_irqsave(lock, flags) \ |
do { \ |
typecheck(unsigned long, flags); \ |
_spin_lock_irqsave(lock, flags); \ |
} while (0) |
#define read_lock_irqsave(lock, flags) \ |
do { \ |
typecheck(unsigned long, flags); \ |
_read_lock_irqsave(lock, flags); \ |
} while (0) |
#define write_lock_irqsave(lock, flags) \ |
do { \ |
typecheck(unsigned long, flags); \ |
_write_lock_irqsave(lock, flags); \ |
} while (0) |
#define spin_lock_irqsave_nested(lock, flags, subclass) \ |
spin_lock_irqsave(lock, flags) |
#endif |
#define spin_lock_irq(lock) _spin_lock_irq(lock) |
#define spin_lock_bh(lock) _spin_lock_bh(lock) |
#define read_lock_irq(lock) _read_lock_irq(lock) |
#define read_lock_bh(lock) _read_lock_bh(lock) |
#define write_lock_irq(lock) _write_lock_irq(lock) |
#define write_lock_bh(lock) _write_lock_bh(lock) |
#define spin_unlock(lock) _spin_unlock(lock) |
#define read_unlock(lock) _read_unlock(lock) |
#define write_unlock(lock) _write_unlock(lock) |
#define spin_unlock_irq(lock) _spin_unlock_irq(lock) |
#define read_unlock_irq(lock) _read_unlock_irq(lock) |
#define write_unlock_irq(lock) _write_unlock_irq(lock) |
#define spin_unlock_irqrestore(lock, flags) \ |
do { \ |
typecheck(unsigned long, flags); \ |
_spin_unlock_irqrestore(lock, flags); \ |
} while (0) |
#define spin_unlock_bh(lock) _spin_unlock_bh(lock) |
#define read_unlock_irqrestore(lock, flags) \ |
do { \ |
typecheck(unsigned long, flags); \ |
_read_unlock_irqrestore(lock, flags); \ |
} while (0) |
#define read_unlock_bh(lock) _read_unlock_bh(lock) |
#define write_unlock_irqrestore(lock, flags) \ |
do { \ |
typecheck(unsigned long, flags); \ |
_write_unlock_irqrestore(lock, flags); \ |
} while (0) |
#define write_unlock_bh(lock) _write_unlock_bh(lock) |
#define spin_trylock_bh(lock) __cond_lock(lock, _spin_trylock_bh(lock)) |
#define spin_trylock_irq(lock) \ |
({ \ |
local_irq_disable(); \ |
spin_trylock(lock) ? \ |
1 : ({ local_irq_enable(); 0; }); \ |
}) |
#define spin_trylock_irqsave(lock, flags) \ |
({ \ |
local_irq_save(flags); \ |
spin_trylock(lock) ? \ |
1 : ({ local_irq_restore(flags); 0; }); \ |
}) |
#define write_trylock_irqsave(lock, flags) \ |
({ \ |
local_irq_save(flags); \ |
write_trylock(lock) ? \ |
1 : ({ local_irq_restore(flags); 0; }); \ |
}) |
/* |
* Pull the atomic_t declaration: |
* (asm-mips/atomic.h needs above definitions) |
*/ |
#include <asm/atomic.h> |
/** |
* atomic_dec_and_lock - lock on reaching reference count zero |
* @atomic: the atomic counter |
* @lock: the spinlock in question |
* |
* Decrements @atomic by 1. If the result is 0, returns true and locks |
* @lock. Returns false for all other cases. |
*/ |
extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock); |
#define atomic_dec_and_lock(atomic, lock) \ |
__cond_lock(lock, _atomic_dec_and_lock(atomic, lock)) |
/** |
* spin_can_lock - would spin_trylock() succeed? |
* @lock: the spinlock in question. |
*/ |
#define spin_can_lock(lock) (!spin_is_locked(lock)) |
/* |
* Pull the _spin_*()/_read_*()/_write_*() functions/declarations: |
*/ |
#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) |
# include <linux/spinlock_api_smp.h> |
#else |
# include <linux/spinlock_api_up.h> |
#endif |
#endif /* __LINUX_SPINLOCK_H */ |
/drivers/include/linux/spinlock_api_up.h |
---|
0,0 → 1,81 |
#ifndef __LINUX_SPINLOCK_API_UP_H |
#define __LINUX_SPINLOCK_API_UP_H |
#ifndef __LINUX_SPINLOCK_H |
# error "please don't include this file directly" |
#endif |
/* |
* include/linux/spinlock_api_up.h |
* |
* spinlock API implementation on UP-nondebug (inlined implementation) |
* |
* portions Copyright 2005, Red Hat, Inc., Ingo Molnar |
* Released under the General Public License (GPL). |
*/ |
#define in_lock_functions(ADDR) 0 |
#define assert_spin_locked(lock) do { (void)(lock); } while (0) |
/* |
* In the UP-nondebug case there's no real locking going on, so the |
* only thing we have to do is to keep the preempt counts and irq |
* flags straight, to suppress compiler warnings of unused lock |
* variables, and to add the proper checker annotations: |
*/ |
#define __LOCK(lock) \ |
do { preempt_disable(); __acquire(lock); (void)(lock); } while (0) |
#define __LOCK_BH(lock) \ |
do { local_bh_disable(); __LOCK(lock); } while (0) |
#define __LOCK_IRQ(lock) \ |
do { local_irq_disable(); __LOCK(lock); } while (0) |
#define __LOCK_IRQSAVE(lock, flags) \ |
do { local_irq_save(flags); __LOCK(lock); } while (0) |
#define __UNLOCK(lock) \ |
do { preempt_enable(); __release(lock); (void)(lock); } while (0) |
#define __UNLOCK_BH(lock) \ |
do { preempt_enable_no_resched(); local_bh_enable(); __release(lock); (void)(lock); } while (0) |
#define __UNLOCK_IRQ(lock) \ |
do { local_irq_enable(); __UNLOCK(lock); } while (0) |
#define __UNLOCK_IRQRESTORE(lock, flags) \ |
do { local_irq_restore(flags); __UNLOCK(lock); } while (0) |
#define _spin_lock(lock) __LOCK(lock) |
#define _spin_lock_nested(lock, subclass) __LOCK(lock) |
#define _read_lock(lock) __LOCK(lock) |
#define _write_lock(lock) __LOCK(lock) |
#define _spin_lock_bh(lock) __LOCK_BH(lock) |
#define _read_lock_bh(lock) __LOCK_BH(lock) |
#define _write_lock_bh(lock) __LOCK_BH(lock) |
#define _spin_lock_irq(lock) __LOCK_IRQ(lock) |
#define _read_lock_irq(lock) __LOCK_IRQ(lock) |
#define _write_lock_irq(lock) __LOCK_IRQ(lock) |
#define _spin_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags) |
#define _read_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags) |
#define _write_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags) |
#define _spin_trylock(lock) ({ __LOCK(lock); 1; }) |
#define _read_trylock(lock) ({ __LOCK(lock); 1; }) |
#define _write_trylock(lock) ({ __LOCK(lock); 1; }) |
#define _spin_trylock_bh(lock) ({ __LOCK_BH(lock); 1; }) |
#define _spin_unlock(lock) __UNLOCK(lock) |
#define _read_unlock(lock) __UNLOCK(lock) |
#define _write_unlock(lock) __UNLOCK(lock) |
#define _spin_unlock_bh(lock) __UNLOCK_BH(lock) |
#define _write_unlock_bh(lock) __UNLOCK_BH(lock) |
#define _read_unlock_bh(lock) __UNLOCK_BH(lock) |
#define _spin_unlock_irq(lock) __UNLOCK_IRQ(lock) |
#define _read_unlock_irq(lock) __UNLOCK_IRQ(lock) |
#define _write_unlock_irq(lock) __UNLOCK_IRQ(lock) |
#define _spin_unlock_irqrestore(lock, flags) __UNLOCK_IRQRESTORE(lock, flags) |
#define _read_unlock_irqrestore(lock, flags) __UNLOCK_IRQRESTORE(lock, flags) |
#define _write_unlock_irqrestore(lock, flags) __UNLOCK_IRQRESTORE(lock, flags) |
#endif /* __LINUX_SPINLOCK_API_UP_H */ |
/drivers/include/linux/spinlock_types.h |
---|
0,0 → 1,100 |
#ifndef __LINUX_SPINLOCK_TYPES_H |
#define __LINUX_SPINLOCK_TYPES_H |
/* |
* include/linux/spinlock_types.h - generic spinlock type definitions |
* and initializers |
* |
* portions Copyright 2005, Red Hat, Inc., Ingo Molnar |
* Released under the General Public License (GPL). |
*/ |
#if defined(CONFIG_SMP) |
# include <asm/spinlock_types.h> |
#else |
# include <linux/spinlock_types_up.h> |
#endif |
#include <linux/lockdep.h> |
typedef struct { |
raw_spinlock_t raw_lock; |
#ifdef CONFIG_GENERIC_LOCKBREAK |
unsigned int break_lock; |
#endif |
#ifdef CONFIG_DEBUG_SPINLOCK |
unsigned int magic, owner_cpu; |
void *owner; |
#endif |
#ifdef CONFIG_DEBUG_LOCK_ALLOC |
struct lockdep_map dep_map; |
#endif |
} spinlock_t; |
#define SPINLOCK_MAGIC 0xdead4ead |
typedef struct { |
raw_rwlock_t raw_lock; |
#ifdef CONFIG_GENERIC_LOCKBREAK |
unsigned int break_lock; |
#endif |
#ifdef CONFIG_DEBUG_SPINLOCK |
unsigned int magic, owner_cpu; |
void *owner; |
#endif |
#ifdef CONFIG_DEBUG_LOCK_ALLOC |
struct lockdep_map dep_map; |
#endif |
} rwlock_t; |
#define RWLOCK_MAGIC 0xdeaf1eed |
#define SPINLOCK_OWNER_INIT ((void *)-1L) |
#ifdef CONFIG_DEBUG_LOCK_ALLOC |
# define SPIN_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname } |
#else |
# define SPIN_DEP_MAP_INIT(lockname) |
#endif |
#ifdef CONFIG_DEBUG_LOCK_ALLOC |
# define RW_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname } |
#else |
# define RW_DEP_MAP_INIT(lockname) |
#endif |
#ifdef CONFIG_DEBUG_SPINLOCK |
# define __SPIN_LOCK_UNLOCKED(lockname) \ |
(spinlock_t) { .raw_lock = __RAW_SPIN_LOCK_UNLOCKED, \ |
.magic = SPINLOCK_MAGIC, \ |
.owner = SPINLOCK_OWNER_INIT, \ |
.owner_cpu = -1, \ |
SPIN_DEP_MAP_INIT(lockname) } |
#define __RW_LOCK_UNLOCKED(lockname) \ |
(rwlock_t) { .raw_lock = __RAW_RW_LOCK_UNLOCKED, \ |
.magic = RWLOCK_MAGIC, \ |
.owner = SPINLOCK_OWNER_INIT, \ |
.owner_cpu = -1, \ |
RW_DEP_MAP_INIT(lockname) } |
#else |
# define __SPIN_LOCK_UNLOCKED(lockname) \ |
(spinlock_t) { .raw_lock = __RAW_SPIN_LOCK_UNLOCKED, \ |
SPIN_DEP_MAP_INIT(lockname) } |
#define __RW_LOCK_UNLOCKED(lockname) \ |
(rwlock_t) { .raw_lock = __RAW_RW_LOCK_UNLOCKED, \ |
RW_DEP_MAP_INIT(lockname) } |
#endif |
/* |
* SPIN_LOCK_UNLOCKED and RW_LOCK_UNLOCKED defeat lockdep state tracking and |
* are hence deprecated. |
* Please use DEFINE_SPINLOCK()/DEFINE_RWLOCK() or |
* __SPIN_LOCK_UNLOCKED()/__RW_LOCK_UNLOCKED() as appropriate. |
*/ |
#define SPIN_LOCK_UNLOCKED __SPIN_LOCK_UNLOCKED(old_style_spin_init) |
#define RW_LOCK_UNLOCKED __RW_LOCK_UNLOCKED(old_style_rw_init) |
#define DEFINE_SPINLOCK(x) spinlock_t x = __SPIN_LOCK_UNLOCKED(x) |
#define DEFINE_RWLOCK(x) rwlock_t x = __RW_LOCK_UNLOCKED(x) |
#endif /* __LINUX_SPINLOCK_TYPES_H */ |
/drivers/include/linux/spinlock_types_up.h |
---|
0,0 → 1,37 |
#ifndef __LINUX_SPINLOCK_TYPES_UP_H |
#define __LINUX_SPINLOCK_TYPES_UP_H |
#ifndef __LINUX_SPINLOCK_TYPES_H |
# error "please don't include this file directly" |
#endif |
/* |
* include/linux/spinlock_types_up.h - spinlock type definitions for UP |
* |
* portions Copyright 2005, Red Hat, Inc., Ingo Molnar |
* Released under the General Public License (GPL). |
*/ |
#ifdef CONFIG_DEBUG_SPINLOCK |
typedef struct { |
volatile unsigned int slock; |
} raw_spinlock_t; |
#define __RAW_SPIN_LOCK_UNLOCKED { 1 } |
#else |
typedef struct { } raw_spinlock_t; |
#define __RAW_SPIN_LOCK_UNLOCKED { } |
#endif |
typedef struct { |
/* no debug version on UP */ |
} raw_rwlock_t; |
#define __RAW_RW_LOCK_UNLOCKED { } |
#endif /* __LINUX_SPINLOCK_TYPES_UP_H */ |
/drivers/include/linux/spinlock_up.h |
---|
0,0 → 1,76 |
#ifndef __LINUX_SPINLOCK_UP_H |
#define __LINUX_SPINLOCK_UP_H |
#ifndef __LINUX_SPINLOCK_H |
# error "please don't include this file directly" |
#endif |
/* |
* include/linux/spinlock_up.h - UP-debug version of spinlocks. |
* |
* portions Copyright 2005, Red Hat, Inc., Ingo Molnar |
* Released under the General Public License (GPL). |
* |
* In the debug case, 1 means unlocked, 0 means locked. (the values |
* are inverted, to catch initialization bugs) |
* |
* No atomicity anywhere, we are on UP. |
*/ |
#ifdef CONFIG_DEBUG_SPINLOCK |
#define __raw_spin_is_locked(x) ((x)->slock == 0) |
static inline void __raw_spin_lock(raw_spinlock_t *lock) |
{ |
lock->slock = 0; |
} |
static inline void |
__raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags) |
{ |
local_irq_save(flags); |
lock->slock = 0; |
} |
static inline int __raw_spin_trylock(raw_spinlock_t *lock) |
{ |
char oldval = lock->slock; |
lock->slock = 0; |
return oldval > 0; |
} |
static inline void __raw_spin_unlock(raw_spinlock_t *lock) |
{ |
lock->slock = 1; |
} |
/* |
* Read-write spinlocks. No debug version. |
*/ |
#define __raw_read_lock(lock) do { (void)(lock); } while (0) |
#define __raw_write_lock(lock) do { (void)(lock); } while (0) |
#define __raw_read_trylock(lock) ({ (void)(lock); 1; }) |
#define __raw_write_trylock(lock) ({ (void)(lock); 1; }) |
#define __raw_read_unlock(lock) do { (void)(lock); } while (0) |
#define __raw_write_unlock(lock) do { (void)(lock); } while (0) |
#else /* DEBUG_SPINLOCK */ |
#define __raw_spin_is_locked(lock) ((void)(lock), 0) |
/* for sched.c and kernel_lock.c: */ |
# define __raw_spin_lock(lock) do { (void)(lock); } while (0) |
# define __raw_spin_lock_flags(lock, flags) do { (void)(lock); } while (0) |
# define __raw_spin_unlock(lock) do { (void)(lock); } while (0) |
# define __raw_spin_trylock(lock) ({ (void)(lock); 1; }) |
#endif /* DEBUG_SPINLOCK */ |
#define __raw_spin_is_contended(lock) (((void)(lock), 0)) |
#define __raw_read_can_lock(lock) (((void)(lock), 1)) |
#define __raw_write_can_lock(lock) (((void)(lock), 1)) |
#define __raw_spin_unlock_wait(lock) \ |
do { cpu_relax(); } while (__raw_spin_is_locked(lock)) |
#endif /* __LINUX_SPINLOCK_UP_H */ |
/drivers/include/linux/stddef.h |
---|
0,0 → 1,28 |
#ifndef _LINUX_STDDEF_H |
#define _LINUX_STDDEF_H |
#include <linux/compiler.h> |
#undef NULL |
#if defined(__cplusplus) |
#define NULL 0 |
#else |
#define NULL ((void *)0) |
#endif |
#ifdef __KERNEL__ |
enum { |
false = 0, |
true = 1 |
}; |
#undef offsetof |
#ifdef __compiler_offsetof |
#define offsetof(TYPE,MEMBER) __compiler_offsetof(TYPE,MEMBER) |
#else |
#define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER) |
#endif |
#endif /* __KERNEL__ */ |
#endif |
/drivers/include/linux/string.h |
---|
0,0 → 1,135 |
#ifndef _LINUX_STRING_H_ |
#define _LINUX_STRING_H_ |
/* We don't want strings.h stuff being used by user stuff by accident */ |
#ifndef __KERNEL__ |
#include <string.h> |
#else |
#include <linux/compiler.h> /* for inline */ |
#include <linux/types.h> /* for size_t */ |
#include <linux/stddef.h> /* for NULL */ |
#include <stdarg.h> |
extern char *strndup_user(const char __user *, long); |
extern void *memdup_user(const void __user *, size_t); |
/* |
* Include machine specific inline routines |
*/ |
#include <asm/string.h> |
#ifndef __HAVE_ARCH_STRCPY |
extern char * strcpy(char *,const char *); |
#endif |
#ifndef __HAVE_ARCH_STRNCPY |
extern char * strncpy(char *,const char *, __kernel_size_t); |
#endif |
#ifndef __HAVE_ARCH_STRLCPY |
size_t strlcpy(char *, const char *, size_t); |
#endif |
#ifndef __HAVE_ARCH_STRCAT |
extern char * strcat(char *, const char *); |
#endif |
#ifndef __HAVE_ARCH_STRNCAT |
extern char * strncat(char *, const char *, __kernel_size_t); |
#endif |
#ifndef __HAVE_ARCH_STRLCAT |
extern size_t strlcat(char *, const char *, __kernel_size_t); |
#endif |
#ifndef __HAVE_ARCH_STRCMP |
extern int strcmp(const char *,const char *); |
#endif |
#ifndef __HAVE_ARCH_STRNCMP |
extern int strncmp(const char *,const char *,__kernel_size_t); |
#endif |
#ifndef __HAVE_ARCH_STRNICMP |
extern int strnicmp(const char *, const char *, __kernel_size_t); |
#endif |
#ifndef __HAVE_ARCH_STRCASECMP |
extern int strcasecmp(const char *s1, const char *s2); |
#endif |
#ifndef __HAVE_ARCH_STRNCASECMP |
extern int strncasecmp(const char *s1, const char *s2, size_t n); |
#endif |
#ifndef __HAVE_ARCH_STRCHR |
extern char * strchr(const char *,int); |
#endif |
#ifndef __HAVE_ARCH_STRNCHR |
extern char * strnchr(const char *, size_t, int); |
#endif |
#ifndef __HAVE_ARCH_STRRCHR |
extern char * strrchr(const char *,int); |
#endif |
extern char * __must_check strstrip(char *); |
#ifndef __HAVE_ARCH_STRSTR |
extern char * strstr(const char *,const char *); |
#endif |
#ifndef __HAVE_ARCH_STRLEN |
extern __kernel_size_t strlen(const char *); |
#endif |
#ifndef __HAVE_ARCH_STRNLEN |
extern __kernel_size_t strnlen(const char *,__kernel_size_t); |
#endif |
#ifndef __HAVE_ARCH_STRPBRK |
extern char * strpbrk(const char *,const char *); |
#endif |
#ifndef __HAVE_ARCH_STRSEP |
extern char * strsep(char **,const char *); |
#endif |
#ifndef __HAVE_ARCH_STRSPN |
extern __kernel_size_t strspn(const char *,const char *); |
#endif |
#ifndef __HAVE_ARCH_STRCSPN |
extern __kernel_size_t strcspn(const char *,const char *); |
#endif |
#ifndef __HAVE_ARCH_MEMSET |
extern void * memset(void *,int,__kernel_size_t); |
#endif |
#ifndef __HAVE_ARCH_MEMCPY |
extern void * memcpy(void *,const void *,__kernel_size_t); |
#endif |
#ifndef __HAVE_ARCH_MEMMOVE |
extern void * memmove(void *,const void *,__kernel_size_t); |
#endif |
#ifndef __HAVE_ARCH_MEMSCAN |
extern void * memscan(void *,int,__kernel_size_t); |
#endif |
#ifndef __HAVE_ARCH_MEMCMP |
extern int memcmp(const void *,const void *,__kernel_size_t); |
#endif |
#ifndef __HAVE_ARCH_MEMCHR |
extern void * memchr(const void *,int,__kernel_size_t); |
#endif |
extern char *kstrdup(const char *s, gfp_t gfp); |
extern char *kstrndup(const char *s, size_t len, gfp_t gfp); |
extern void *kmemdup(const void *src, size_t len, gfp_t gfp); |
extern char **argv_split(gfp_t gfp, const char *str, int *argcp); |
extern void argv_free(char **argv); |
extern bool sysfs_streq(const char *s1, const char *s2); |
#ifdef CONFIG_BINARY_PRINTF |
int vbin_printf(u32 *bin_buf, size_t size, const char *fmt, va_list args); |
int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf); |
int bprintf(u32 *bin_buf, size_t size, const char *fmt, ...) __printf(3, 4); |
#endif |
extern ssize_t memory_read_from_buffer(void *to, size_t count, loff_t *ppos, |
const void *from, size_t available); |
/** |
* strstarts - does @str start with @prefix? |
* @str: string to examine |
* @prefix: prefix to look for. |
*/ |
static inline bool strstarts(const char *str, const char *prefix) |
{ |
return strncmp(str, prefix, strlen(prefix)) == 0; |
} |
#endif |
#endif /* _LINUX_STRING_H_ */ |
/drivers/include/linux/stringify.h |
---|
0,0 → 1,12 |
#ifndef __LINUX_STRINGIFY_H |
#define __LINUX_STRINGIFY_H |
/* Indirect stringification. Doing two levels allows the parameter to be a |
* macro itself. For example, compile with -DFOO=bar, __stringify(FOO) |
* converts to "bar". |
*/ |
#define __stringify_1(x...) #x |
#define __stringify(x...) __stringify_1(x) |
#endif /* !__LINUX_STRINGIFY_H */ |
/drivers/include/linux/swab.h |
---|
0,0 → 1,299 |
#ifndef _LINUX_SWAB_H |
#define _LINUX_SWAB_H |
#include <linux/types.h> |
#include <linux/compiler.h> |
#include <asm/swab.h> |
/* |
* casts are necessary for constants, because we never know how for sure |
* how U/UL/ULL map to __u16, __u32, __u64. At least not in a portable way. |
*/ |
#define ___constant_swab16(x) ((__u16)( \ |
(((__u16)(x) & (__u16)0x00ffU) << 8) | \ |
(((__u16)(x) & (__u16)0xff00U) >> 8))) |
#define ___constant_swab32(x) ((__u32)( \ |
(((__u32)(x) & (__u32)0x000000ffUL) << 24) | \ |
(((__u32)(x) & (__u32)0x0000ff00UL) << 8) | \ |
(((__u32)(x) & (__u32)0x00ff0000UL) >> 8) | \ |
(((__u32)(x) & (__u32)0xff000000UL) >> 24))) |
#define ___constant_swab64(x) ((__u64)( \ |
(((__u64)(x) & (__u64)0x00000000000000ffULL) << 56) | \ |
(((__u64)(x) & (__u64)0x000000000000ff00ULL) << 40) | \ |
(((__u64)(x) & (__u64)0x0000000000ff0000ULL) << 24) | \ |
(((__u64)(x) & (__u64)0x00000000ff000000ULL) << 8) | \ |
(((__u64)(x) & (__u64)0x000000ff00000000ULL) >> 8) | \ |
(((__u64)(x) & (__u64)0x0000ff0000000000ULL) >> 24) | \ |
(((__u64)(x) & (__u64)0x00ff000000000000ULL) >> 40) | \ |
(((__u64)(x) & (__u64)0xff00000000000000ULL) >> 56))) |
#define ___constant_swahw32(x) ((__u32)( \ |
(((__u32)(x) & (__u32)0x0000ffffUL) << 16) | \ |
(((__u32)(x) & (__u32)0xffff0000UL) >> 16))) |
#define ___constant_swahb32(x) ((__u32)( \ |
(((__u32)(x) & (__u32)0x00ff00ffUL) << 8) | \ |
(((__u32)(x) & (__u32)0xff00ff00UL) >> 8))) |
/* |
* Implement the following as inlines, but define the interface using |
* macros to allow constant folding when possible: |
* ___swab16, ___swab32, ___swab64, ___swahw32, ___swahb32 |
*/ |
static inline __attribute_const__ __u16 __fswab16(__u16 val) |
{ |
#ifdef __arch_swab16 |
return __arch_swab16(val); |
#else |
return ___constant_swab16(val); |
#endif |
} |
static inline __attribute_const__ __u32 __fswab32(__u32 val) |
{ |
#ifdef __arch_swab32 |
return __arch_swab32(val); |
#else |
return ___constant_swab32(val); |
#endif |
} |
static inline __attribute_const__ __u64 __fswab64(__u64 val) |
{ |
#ifdef __arch_swab64 |
return __arch_swab64(val); |
#elif defined(__SWAB_64_THRU_32__) |
__u32 h = val >> 32; |
__u32 l = val & ((1ULL << 32) - 1); |
return (((__u64)__fswab32(l)) << 32) | ((__u64)(__fswab32(h))); |
#else |
return ___constant_swab64(val); |
#endif |
} |
static inline __attribute_const__ __u32 __fswahw32(__u32 val) |
{ |
#ifdef __arch_swahw32 |
return __arch_swahw32(val); |
#else |
return ___constant_swahw32(val); |
#endif |
} |
static inline __attribute_const__ __u32 __fswahb32(__u32 val) |
{ |
#ifdef __arch_swahb32 |
return __arch_swahb32(val); |
#else |
return ___constant_swahb32(val); |
#endif |
} |
/** |
* __swab16 - return a byteswapped 16-bit value |
* @x: value to byteswap |
*/ |
#define __swab16(x) \ |
(__builtin_constant_p((__u16)(x)) ? \ |
___constant_swab16(x) : \ |
__fswab16(x)) |
/** |
* __swab32 - return a byteswapped 32-bit value |
* @x: value to byteswap |
*/ |
#define __swab32(x) \ |
(__builtin_constant_p((__u32)(x)) ? \ |
___constant_swab32(x) : \ |
__fswab32(x)) |
/** |
* __swab64 - return a byteswapped 64-bit value |
* @x: value to byteswap |
*/ |
#define __swab64(x) \ |
(__builtin_constant_p((__u64)(x)) ? \ |
___constant_swab64(x) : \ |
__fswab64(x)) |
/** |
* __swahw32 - return a word-swapped 32-bit value |
* @x: value to wordswap |
* |
* __swahw32(0x12340000) is 0x00001234 |
*/ |
#define __swahw32(x) \ |
(__builtin_constant_p((__u32)(x)) ? \ |
___constant_swahw32(x) : \ |
__fswahw32(x)) |
/** |
* __swahb32 - return a high and low byte-swapped 32-bit value |
* @x: value to byteswap |
* |
* __swahb32(0x12345678) is 0x34127856 |
*/ |
#define __swahb32(x) \ |
(__builtin_constant_p((__u32)(x)) ? \ |
___constant_swahb32(x) : \ |
__fswahb32(x)) |
/** |
* __swab16p - return a byteswapped 16-bit value from a pointer |
* @p: pointer to a naturally-aligned 16-bit value |
*/ |
static inline __u16 __swab16p(const __u16 *p) |
{ |
#ifdef __arch_swab16p |
return __arch_swab16p(p); |
#else |
return __swab16(*p); |
#endif |
} |
/** |
* __swab32p - return a byteswapped 32-bit value from a pointer |
* @p: pointer to a naturally-aligned 32-bit value |
*/ |
static inline __u32 __swab32p(const __u32 *p) |
{ |
#ifdef __arch_swab32p |
return __arch_swab32p(p); |
#else |
return __swab32(*p); |
#endif |
} |
/** |
* __swab64p - return a byteswapped 64-bit value from a pointer |
* @p: pointer to a naturally-aligned 64-bit value |
*/ |
static inline __u64 __swab64p(const __u64 *p) |
{ |
#ifdef __arch_swab64p |
return __arch_swab64p(p); |
#else |
return __swab64(*p); |
#endif |
} |
/** |
* __swahw32p - return a wordswapped 32-bit value from a pointer |
* @p: pointer to a naturally-aligned 32-bit value |
* |
* See __swahw32() for details of wordswapping. |
*/ |
static inline __u32 __swahw32p(const __u32 *p) |
{ |
#ifdef __arch_swahw32p |
return __arch_swahw32p(p); |
#else |
return __swahw32(*p); |
#endif |
} |
/** |
* __swahb32p - return a high and low byteswapped 32-bit value from a pointer |
* @p: pointer to a naturally-aligned 32-bit value |
* |
* See __swahb32() for details of high/low byteswapping. |
*/ |
static inline __u32 __swahb32p(const __u32 *p) |
{ |
#ifdef __arch_swahb32p |
return __arch_swahb32p(p); |
#else |
return __swahb32(*p); |
#endif |
} |
/** |
* __swab16s - byteswap a 16-bit value in-place |
* @p: pointer to a naturally-aligned 16-bit value |
*/ |
static inline void __swab16s(__u16 *p) |
{ |
#ifdef __arch_swab16s |
__arch_swab16s(p); |
#else |
*p = __swab16p(p); |
#endif |
} |
/** |
* __swab32s - byteswap a 32-bit value in-place |
* @p: pointer to a naturally-aligned 32-bit value |
*/ |
static inline void __swab32s(__u32 *p) |
{ |
#ifdef __arch_swab32s |
__arch_swab32s(p); |
#else |
*p = __swab32p(p); |
#endif |
} |
/** |
* __swab64s - byteswap a 64-bit value in-place |
* @p: pointer to a naturally-aligned 64-bit value |
*/ |
static inline void __swab64s(__u64 *p) |
{ |
#ifdef __arch_swab64s |
__arch_swab64s(p); |
#else |
*p = __swab64p(p); |
#endif |
} |
/** |
* __swahw32s - wordswap a 32-bit value in-place |
* @p: pointer to a naturally-aligned 32-bit value |
* |
* See __swahw32() for details of wordswapping |
*/ |
static inline void __swahw32s(__u32 *p) |
{ |
#ifdef __arch_swahw32s |
__arch_swahw32s(p); |
#else |
*p = __swahw32p(p); |
#endif |
} |
/** |
* __swahb32s - high and low byteswap a 32-bit value in-place |
* @p: pointer to a naturally-aligned 32-bit value |
* |
* See __swahb32() for details of high and low byte swapping |
*/ |
static inline void __swahb32s(__u32 *p) |
{ |
#ifdef __arch_swahb32s |
__arch_swahb32s(p); |
#else |
*p = __swahb32p(p); |
#endif |
} |
#ifdef __KERNEL__ |
# define swab16 __swab16 |
# define swab32 __swab32 |
# define swab64 __swab64 |
# define swahw32 __swahw32 |
# define swahb32 __swahb32 |
# define swab16p __swab16p |
# define swab32p __swab32p |
# define swab64p __swab64p |
# define swahw32p __swahw32p |
# define swahb32p __swahb32p |
# define swab16s __swab16s |
# define swab32s __swab32s |
# define swab64s __swab64s |
# define swahw32s __swahw32s |
# define swahb32s __swahb32s |
#endif /* __KERNEL__ */ |
#endif /* _LINUX_SWAB_H */ |
/drivers/include/linux/typecheck.h |
---|
0,0 → 1,24 |
#ifndef TYPECHECK_H_INCLUDED |
#define TYPECHECK_H_INCLUDED |
/* |
* Check at compile time that something is of a particular type. |
* Always evaluates to 1 so you may use it easily in comparisons. |
*/ |
#define typecheck(type,x) \ |
({ type __dummy; \ |
typeof(x) __dummy2; \ |
(void)(&__dummy == &__dummy2); \ |
1; \ |
}) |
/* |
* Check at compile time that 'function' is a certain type, or is a pointer |
* to that type (needs to use typedef for the function type.) |
*/ |
#define typecheck_fn(type,function) \ |
({ typeof(type) __tmp = function; \ |
(void)__tmp; \ |
}) |
#endif /* TYPECHECK_H_INCLUDED */ |
/drivers/include/linux/types.h |
---|
0,0 → 1,345 |
#ifndef _LINUX_TYPES_H |
#define _LINUX_TYPES_H |
#include <asm/types.h> |
#ifndef __ASSEMBLY__ |
#ifdef __KERNEL__ |
#define DECLARE_BITMAP(name,bits) \ |
unsigned long name[BITS_TO_LONGS(bits)] |
#endif |
#include <linux/posix_types.h> |
#ifdef __KERNEL__ |
typedef __u32 __kernel_dev_t; |
typedef __kernel_fd_set fd_set; |
typedef __kernel_dev_t dev_t; |
typedef __kernel_ino_t ino_t; |
typedef __kernel_mode_t mode_t; |
typedef __kernel_nlink_t nlink_t; |
typedef __kernel_off_t off_t; |
typedef __kernel_pid_t pid_t; |
typedef __kernel_daddr_t daddr_t; |
typedef __kernel_key_t key_t; |
typedef __kernel_suseconds_t suseconds_t; |
typedef __kernel_timer_t timer_t; |
typedef __kernel_clockid_t clockid_t; |
typedef __kernel_mqd_t mqd_t; |
typedef _Bool bool; |
typedef __kernel_uid32_t uid_t; |
typedef __kernel_gid32_t gid_t; |
typedef __kernel_uid16_t uid16_t; |
typedef __kernel_gid16_t gid16_t; |
typedef unsigned long uintptr_t; |
#ifdef CONFIG_UID16 |
/* This is defined by include/asm-{arch}/posix_types.h */ |
typedef __kernel_old_uid_t old_uid_t; |
typedef __kernel_old_gid_t old_gid_t; |
#endif /* CONFIG_UID16 */ |
#if defined(__GNUC__) |
typedef __kernel_loff_t loff_t; |
#endif |
/* |
* The following typedefs are also protected by individual ifdefs for |
* historical reasons: |
*/ |
#ifndef _SIZE_T |
#define _SIZE_T |
typedef __kernel_size_t size_t; |
#endif |
#ifndef _SSIZE_T |
#define _SSIZE_T |
typedef __kernel_ssize_t ssize_t; |
#endif |
#ifndef _PTRDIFF_T |
#define _PTRDIFF_T |
typedef __kernel_ptrdiff_t ptrdiff_t; |
#endif |
#ifndef _TIME_T |
#define _TIME_T |
typedef __kernel_time_t time_t; |
#endif |
#ifndef _CLOCK_T |
#define _CLOCK_T |
typedef __kernel_clock_t clock_t; |
#endif |
#ifndef _CADDR_T |
#define _CADDR_T |
typedef __kernel_caddr_t caddr_t; |
#endif |
/* bsd */ |
typedef unsigned char u_char; |
typedef unsigned short u_short; |
typedef unsigned int u_int; |
typedef unsigned long u_long; |
/* sysv */ |
typedef unsigned char unchar; |
typedef unsigned short ushort; |
typedef unsigned int uint; |
typedef unsigned long ulong; |
#ifndef __BIT_TYPES_DEFINED__ |
#define __BIT_TYPES_DEFINED__ |
typedef __u8 u_int8_t; |
typedef __s8 int8_t; |
typedef __u16 u_int16_t; |
typedef __s16 int16_t; |
typedef __u32 u_int32_t; |
typedef __s32 int32_t; |
#endif /* !(__BIT_TYPES_DEFINED__) */ |
typedef __u8 uint8_t; |
typedef __u16 uint16_t; |
typedef __u32 uint32_t; |
#if defined(__GNUC__) |
typedef __u64 uint64_t; |
typedef __u64 u_int64_t; |
typedef __s64 int64_t; |
#endif |
/* this is a special 64bit data type that is 8-byte aligned */ |
#define aligned_u64 __u64 __attribute__((aligned(8))) |
#define aligned_be64 __be64 __attribute__((aligned(8))) |
#define aligned_le64 __le64 __attribute__((aligned(8))) |
/** |
* The type used for indexing onto a disc or disc partition. |
* |
* Linux always considers sectors to be 512 bytes long independently |
* of the devices real block size. |
* |
* blkcnt_t is the type of the inode's block count. |
*/ |
#ifdef CONFIG_LBDAF |
typedef u64 sector_t; |
typedef u64 blkcnt_t; |
#else |
typedef unsigned long sector_t; |
typedef unsigned long blkcnt_t; |
#endif |
/* |
* The type of an index into the pagecache. Use a #define so asm/types.h |
* can override it. |
*/ |
#ifndef pgoff_t |
#define pgoff_t unsigned long |
#endif |
#endif /* __KERNEL__ */ |
/* |
* Below are truly Linux-specific types that should never collide with |
* any application/library that wants linux/types.h. |
*/ |
#ifdef __CHECKER__ |
#define __bitwise__ __attribute__((bitwise)) |
#else |
#define __bitwise__ |
#endif |
#ifdef __CHECK_ENDIAN__ |
#define __bitwise __bitwise__ |
#else |
#define __bitwise |
#endif |
typedef __u16 __bitwise __le16; |
typedef __u16 __bitwise __be16; |
typedef __u32 __bitwise __le32; |
typedef __u32 __bitwise __be32; |
typedef __u64 __bitwise __le64; |
typedef __u64 __bitwise __be64; |
typedef __u16 __bitwise __sum16; |
typedef __u32 __bitwise __wsum; |
#ifdef __KERNEL__ |
typedef unsigned __bitwise__ gfp_t; |
typedef unsigned __bitwise__ fmode_t; |
#ifdef CONFIG_PHYS_ADDR_T_64BIT |
typedef u64 phys_addr_t; |
#else |
typedef u32 phys_addr_t; |
#endif |
typedef phys_addr_t resource_size_t; |
typedef struct { |
volatile int counter; |
} atomic_t; |
#ifdef CONFIG_64BIT |
typedef struct { |
volatile long counter; |
} atomic64_t; |
#endif |
struct ustat { |
__kernel_daddr_t f_tfree; |
__kernel_ino_t f_tinode; |
char f_fname[6]; |
char f_fpack[6]; |
}; |
#endif /* __KERNEL__ */ |
#endif /* __ASSEMBLY__ */ |
typedef unsigned char u8_t; |
typedef unsigned short u16_t; |
typedef unsigned int u32_t; |
typedef unsigned long long u64_t; |
typedef unsigned int addr_t; |
typedef unsigned int count_t; |
# define WARN(condition, format...) |
#define false 0 |
#define true 1 |
#define likely(x) __builtin_expect(!!(x), 1) |
#define unlikely(x) __builtin_expect(!!(x), 0) |
#define BITS_PER_LONG 32 |
#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d)) |
#define DRM_NAME "drm" /**< Name in kernel, /dev, and /proc */ |
#define DRM_INFO(fmt, arg...) dbgprintf("DRM: "fmt , ##arg) |
#define DRM_ERROR(fmt, arg...) \ |
printk(KERN_ERR "[" DRM_NAME ":%s] *ERROR* " fmt , __func__ , ##arg) |
#define BUILD_BUG_ON_ZERO(e) (sizeof(char[1 - 2 * !!(e)]) - 1) |
#define __must_be_array(a) \ |
BUILD_BUG_ON_ZERO(__builtin_types_compatible_p(typeof(a), typeof(&a[0]))) |
#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]) + __must_be_array(arr)) |
#ifndef HAVE_ARCH_BUG |
#define BUG() do { \ |
printk("BUG: failure at %s:%d/%s()!\n", __FILE__, __LINE__, __FUNCTION__); \ |
/* panic("BUG!"); */ \ |
} while (0) |
#endif |
#ifndef HAVE_ARCH_BUG_ON |
#define BUG_ON(condition) do { if (unlikely(condition)) BUG(); } while(0) |
#endif |
#define MTRR_TYPE_UNCACHABLE 0 |
#define MTRR_TYPE_WRCOMB 1 |
#define MTRR_TYPE_WRTHROUGH 4 |
#define MTRR_TYPE_WRPROT 5 |
#define MTRR_TYPE_WRBACK 6 |
#define MTRR_NUM_TYPES 7 |
int dbgprintf(const char* format, ...); |
#define GFP_KERNEL 0 |
//#include <stdio.h> |
int snprintf(char *str, size_t size, const char *format, ...); |
//#include <string.h> |
void* memcpy(void *s1, const void *s2, size_t n); |
void* memset(void *s, int c, size_t n); |
size_t strlen(const char *s); |
char *strcpy(char *s1, const char *s2); |
char *strncpy (char *dst, const char *src, size_t len); |
void *malloc(size_t size); |
#define kfree free |
static inline void *kzalloc(size_t size, uint32_t flags) |
{ |
void *ret = malloc(size); |
memset(ret, 0, size); |
return ret; |
} |
#define kmalloc(s,f) kzalloc((s), (f)) |
struct drm_file; |
#define DRM_MEMORYBARRIER() __asm__ __volatile__("lock; addl $0,0(%esp)") |
#define mb() __asm__ __volatile__("lock; addl $0,0(%esp)") |
#define PAGE_SHIFT 12 |
#define PAGE_SIZE (1UL << PAGE_SHIFT) |
#define PAGE_MASK (~(PAGE_SIZE-1)) |
#define do_div(n, base) \ |
({ \ |
unsigned long __upper, __low, __high, __mod, __base; \ |
__base = (base); \ |
asm("":"=a" (__low), "=d" (__high) : "A" (n)); \ |
__upper = __high; \ |
if (__high) { \ |
__upper = __high % (__base); \ |
__high = __high / (__base); \ |
} \ |
asm("divl %2":"=a" (__low), "=d" (__mod) \ |
: "rm" (__base), "0" (__low), "1" (__upper)); \ |
asm("":"=A" (n) : "a" (__low), "d" (__high)); \ |
__mod; \ |
}) |
#define ENTER() dbgprintf("enter %s\n",__FUNCTION__) |
#define LEAVE() dbgprintf("leave %s\n",__FUNCTION__) |
#define PCI_DEVICE_ID_ATI_RADEON_QY 0x5159 |
#endif /* _LINUX_TYPES_H */ |
/drivers/include/syscall.h |
---|
1,4 → 1,8 |
#ifndef __SYSCALL_H__ |
#define __SYSCALL_H__ |
#define OS_BASE 0x80000000 |
typedef struct |
17,7 → 21,7 |
#define ERR_PARAM -1 |
u32_t __stdcall drvEntry(int)__asm__("_drvEntry"); |
u32_t drvEntry(int, char *)__asm__("_drvEntry"); |
/////////////////////////////////////////////////////////////////////////////// |
38,11 → 42,17 |
#define PG_NOCACHE 0x018 |
void* STDCALL AllocKernelSpace(size_t size)__asm__("AllocKernelSpace"); |
void STDCALL FreeKernelSpace(void *mem)__asm__("FreeKernelSpace"); |
addr_t STDCALL MapIoMem(addr_t base, size_t size, u32_t flags)__asm__("MapIoMem"); |
void* STDCALL KernelAlloc(size_t size)__asm__("KernelAlloc"); |
void* STDCALL KernelFree(void *mem)__asm__("KernelFree"); |
void* STDCALL UserAlloc(size_t size)__asm__("UserAlloc"); |
int STDCALL UserFree(void *mem)__asm__("UserFree"); |
void* STDCALL GetDisplay()__asm__("GetDisplay"); |
addr_t STDCALL AllocPage()__asm__("AllocPage"); |
addr_t STDCALL AllocPages(count_t count)__asm__("AllocPages"); |
void* STDCALL CreateRingBuffer(size_t size, u32_t map)__asm__("CreateRingBuffer"); |
52,11 → 62,6 |
int STDCALL AttachIntHandler(int irq, void *handler, u32_t access) __asm__("AttachIntHandler"); |
//void *CreateObject(u32 pid, size_t size); |
//void *DestroyObject(void *obj); |
addr_t STDCALL MapIoMem(addr_t base, size_t size, u32_t flags)__asm__("MapIoMem"); |
/////////////////////////////////////////////////////////////////////////////// |
void STDCALL SetMouseData(int btn, int x, int y, |
98,7 → 103,7 |
/////////////////////////////////////////////////////////////////////////////// |
extern inline int GetScreenSize() |
static inline int GetScreenSize() |
{ |
int retval; |
108,7 → 113,7 |
return retval; |
} |
extern inline int GetScreenBpp() |
static inline int GetScreenBpp() |
{ |
int retval; |
118,7 → 123,7 |
return retval; |
} |
extern inline int GetScreenPitch() |
static inline int GetScreenPitch() |
{ |
int retval; |
128,7 → 133,7 |
return retval; |
} |
extern inline u32_t GetPgAddr(void *mem) |
static inline u32_t GetPgAddr(void *mem) |
{ |
u32_t retval; |
139,7 → 144,7 |
return retval; |
}; |
extern inline void CommitPages(void *mem, u32_t page, u32_t size) |
static inline void CommitPages(void *mem, u32_t page, u32_t size) |
{ |
size = (size+4095) & ~4095; |
__asm__ __volatile__ ( |
149,7 → 154,7 |
__asm__ __volatile__ ("":::"eax","ebx","ecx"); |
}; |
extern inline void UnmapPages(void *mem, size_t size) |
static inline void UnmapPages(void *mem, size_t size) |
{ |
size = (size+4095) & ~4095; |
__asm__ __volatile__ ( |
159,11 → 164,11 |
__asm__ __volatile__ ("":::"eax","ecx"); |
}; |
extern inline void usleep(u32_t delay) |
static inline void usleep(u32_t delay) |
{ |
if( !delay ) |
delay++; |
delay*=1000; |
delay*= 500; |
while(delay--) |
__asm__ __volatile__ ( |
172,8 → 177,36 |
:::"eax","ebx","ecx","edx"); |
}; |
extern inline u32_t __PciApi(int cmd) |
static inline void udelay(u32_t delay) |
{ |
if(!delay) delay++; |
delay*= 500; |
while(delay--) |
{ |
__asm__ __volatile__( |
"xorl %%eax, %%eax \n\t" |
"cpuid" |
:::"eax","ebx","ecx","edx" ); |
} |
} |
static inline void mdelay(u32_t time) |
{ |
time /= 10; |
if(!time) time = 1; |
__asm__ __volatile__ ( |
"call *__imp__Delay" |
::"b" (time)); |
__asm__ __volatile__ ( |
"":::"ebx"); |
}; |
static inline u32_t __PciApi(int cmd) |
{ |
u32_t retval; |
__asm__ __volatile__ ( |
184,7 → 217,7 |
return retval; |
}; |
extern inline void* __CreateObject(u32_t pid, size_t size) |
static inline void* __CreateObject(u32_t pid, size_t size) |
{ |
void *retval; |
196,13 → 229,15 |
return retval; |
} |
extern inline void *__DestroyObject(void *obj) |
static inline void __DestroyObject(void *obj) |
{ |
__asm__ __volatile__ ( |
"call *__imp__DestroyObject" |
"call *__imp__DestroyObject \n\t" |
: |
:"a" (obj) |
:"ebx","edx","esi","edi", "memory"); |
:"a" (obj)); |
__asm__ __volatile__ ( |
"" |
:::"eax","ebx","ecx","edx","esi","edi","cc","memory"); |
} |
224,8 → 259,24 |
}; |
*/ |
extern inline u32_t safe_cli(void) |
static inline u32_t GetService(const char *name) |
{ |
u32_t handle; |
__asm__ __volatile__ |
( |
"pushl %%eax \n\t" |
"call *__imp__GetService" |
:"=eax" (handle) |
:"a" (name) |
:"ebx","ecx","edx","esi", "edi" |
); |
return handle; |
}; |
static inline u32_t safe_cli(void) |
{ |
u32_t ifl; |
__asm__ __volatile__ ( |
"pushf\n\t" |
235,7 → 286,7 |
return ifl; |
} |
extern inline void safe_sti(u32_t ifl) |
static inline void safe_sti(u32_t ifl) |
{ |
__asm__ __volatile__ ( |
"pushl %0\n\t" |
244,7 → 295,7 |
); |
} |
extern inline void __clear (void * dst, unsigned len) |
static inline void __clear (void * dst, unsigned len) |
{ |
u32_t tmp; |
__asm__ __volatile__ ( |
256,25 → 307,25 |
__asm__ __volatile__ ("":::"ecx","edi"); |
}; |
extern inline void out8(const u16_t port, const u8_t val) |
static inline void out8(const u16_t port, const u8_t val) |
{ |
__asm__ __volatile__ |
("outb %1, %0\n" : : "dN"(port), "a"(val)); |
} |
extern inline void out16(const u16_t port, const u16_t val) |
static inline void out16(const u16_t port, const u16_t val) |
{ |
__asm__ __volatile__ |
("outw %1, %0\n" : : "dN"(port), "a"(val)); |
} |
extern inline void out32(const u16_t port, const u32_t val) |
static inline void out32(const u16_t port, const u32_t val) |
{ |
__asm__ __volatile__ |
("outl %1, %0\n" : : "dN"(port), "a"(val)); |
} |
extern inline u8_t in8(const u16_t port) |
static inline u8_t in8(const u16_t port) |
{ |
u8_t tmp; |
__asm__ __volatile__ |
282,7 → 333,7 |
return tmp; |
}; |
extern inline u16_t in16(const u16_t port) |
static inline u16_t in16(const u16_t port) |
{ |
u16_t tmp; |
__asm__ __volatile__ |
290,7 → 341,7 |
return tmp; |
}; |
extern inline u32_t in32(const u16_t port) |
static inline u32_t in32(const u16_t port) |
{ |
u32_t tmp; |
__asm__ __volatile__ |
298,7 → 349,7 |
return tmp; |
}; |
extern inline void delay(int time) |
static inline void delay(int time) |
{ |
__asm__ __volatile__ ( |
"call *__imp__Delay" |
308,9 → 359,49 |
} |
extern inline void change_task() |
static inline void change_task() |
{ |
__asm__ __volatile__ ( |
"call *__imp__ChangeTask"); |
} |
static inline sysSetScreen(int width, int height, int pitch) |
{ |
__asm__ __volatile__ |
( |
"call *__imp__SetScreen" |
: |
:"a" (width-1),"d"(height-1), "c"(pitch) |
); |
__asm__ __volatile__ |
("" :::"eax","ecx","edx"); |
} |
int drm_order(unsigned long size); |
static inline void __iomem *ioremap(uint32_t offset, size_t size) |
{ |
return (void __iomem*) MapIoMem(offset, size, 3); |
} |
static inline void iounmap(void *addr) |
{ |
FreeKernelSpace(addr); |
} |
static inline void * |
pci_alloc_consistent(struct pci_dev *hwdev, size_t size, |
addr_t *dma_handle) |
{ |
*dma_handle = AllocPages(size >> 12); |
return (void*)MapIoMem(*dma_handle, size, PG_SW+PG_NOCACHE); |
} |
static inline void __SysMsgBoardStr(char *text) |
{ |
__asm__ __volatile__( |
"call *__imp__SysMsgBoardStr" |
::"S" (text)); |
}; |
#endif |
/drivers/video/drm/includes/linux/pci.h |
---|
File deleted |
/drivers/video/drm/includes/linux/firmware.h |
---|
File deleted |
/drivers/video/drm/includes/linux/stringify.h |
---|
File deleted |
/drivers/video/drm/includes/linux/typecheck.h |
---|
File deleted |
/drivers/video/drm/includes/linux/spinlock_api_up.h |
---|
File deleted |
/drivers/video/drm/includes/linux/kernel.h |
---|
File deleted |
/drivers/video/drm/includes/linux/list_sort.h |
---|
File deleted |
/drivers/video/drm/includes/linux/spinlock.h |
---|
File deleted |
/drivers/video/drm/includes/linux/errno.h |
---|
File deleted |
/drivers/video/drm/includes/linux/posix_types.h |
---|
File deleted |
/drivers/video/drm/includes/linux/seq_file.h |
---|
File deleted |
/drivers/video/drm/includes/linux/fb.h |
---|
File deleted |
/drivers/video/drm/includes/linux/bitops.h |
---|
File deleted |
/drivers/video/drm/includes/linux/types.h |
---|
File deleted |
/drivers/video/drm/includes/linux/i2c-id.h |
---|
File deleted |
/drivers/video/drm/includes/linux/compiler.h |
---|
File deleted |
/drivers/video/drm/includes/linux/spinlock_types_up.h |
---|
File deleted |
/drivers/video/drm/includes/linux/list.h |
---|
File deleted |
/drivers/video/drm/includes/linux/bitmap.h |
---|
File deleted |
/drivers/video/drm/includes/linux/i2c.h |
---|
File deleted |
/drivers/video/drm/includes/linux/idr.h |
---|
File deleted |
/drivers/video/drm/includes/linux/lockdep.h |
---|
File deleted |
/drivers/video/drm/includes/linux/module.h |
---|
File deleted |
/drivers/video/drm/includes/linux/spinlock_up.h |
---|
File deleted |
/drivers/video/drm/includes/linux/sched.h |
---|
File deleted |
/drivers/video/drm/includes/linux/kref.h |
---|
File deleted |
/drivers/video/drm/includes/linux/compiler-gcc4.h |
---|
File deleted |
/drivers/video/drm/includes/linux/swab.h |
---|
File deleted |
/drivers/video/drm/includes/linux/compiler-gcc.h |
---|
File deleted |
/drivers/video/drm/includes/linux/string.h |
---|
File deleted |
/drivers/video/drm/includes/linux/spinlock_types.h |
---|
File deleted |
/drivers/video/drm/includes/linux/i2c-algo-bit.h |
---|
File deleted |
/drivers/video/drm/includes/linux/stddef.h |
---|
File deleted |
/drivers/video/drm/includes/linux/byteorder/little_endian.h |
---|
File deleted |
/drivers/video/drm/includes/linux/byteorder/generic.h |
---|
File deleted |
/drivers/video/drm/includes/asm/required-features.h |
---|
File deleted |
/drivers/video/drm/includes/asm/swab.h |
---|
File deleted |
/drivers/video/drm/includes/asm/cmpxchg_32.h |
---|
File deleted |
/drivers/video/drm/includes/asm/byteorder.h |
---|
File deleted |
/drivers/video/drm/includes/asm/bitops.h |
---|
File deleted |
/drivers/video/drm/includes/asm/cpufeature.h |
---|
File deleted |
/drivers/video/drm/includes/asm/types.h |
---|
File deleted |
/drivers/video/drm/includes/asm/spinlock_types.h |
---|
File deleted |
/drivers/video/drm/includes/asm/string.h |
---|
File deleted |
/drivers/video/drm/includes/asm/string_32.h |
---|
File deleted |
/drivers/video/drm/includes/asm/atomic.h |
---|
File deleted |
/drivers/video/drm/includes/asm/alternative.h |
---|
File deleted |
/drivers/video/drm/includes/asm/atomic_32.h |
---|
File deleted |
/drivers/video/drm/includes/asm/asm.h |
---|
File deleted |
/drivers/video/drm/includes/asm/posix_types.h |
---|
File deleted |
/drivers/video/drm/includes/asm/bitsperlong.h |
---|
File deleted |
/drivers/video/drm/includes/asm/cmpxchg.h |
---|
File deleted |
/drivers/video/drm/includes/asm/posix_types_32.h |
---|
File deleted |
/drivers/video/drm/includes/syscall.h |
---|
File deleted |
/drivers/video/drm/includes/errno-base.h |
---|
File deleted |
/drivers/video/drm/includes/asm-generic/types.h |
---|
File deleted |
/drivers/video/drm/includes/asm-generic/bitsperlong.h |
---|
File deleted |
/drivers/video/drm/includes/asm-generic/int-ll64.h |
---|
File deleted |
/drivers/video/drm/includes/asm-generic/atomic-long.h |
---|
File deleted |
/drivers/video/drm/includes/asm-generic/bitops/hweight.h |
---|
File deleted |
/drivers/video/drm/includes/asm-generic/bitops/le.h |
---|
File deleted |
/drivers/video/drm/includes/asm-generic/bitops/ext2-non-atomic.h |
---|
File deleted |
/drivers/video/drm/includes/asm-generic/bitops/minix.h |
---|
File deleted |
/drivers/video/drm/includes/asm-generic/bitops/sched.h |
---|
File deleted |
/drivers/video/drm/includes/asm-generic/bitops/fls64.h |
---|
File deleted |
/drivers/video/drm/radeon/makefile |
---|
4,15 → 4,17 |
DEFINES = -D__KERNEL__ -DCONFIG_X86_32 |
DRV_TOPDIR = $(CURDIR)/../../.. |
DRM_TOPDIR = $(CURDIR)/.. |
DRM_INCLUDES = $(DRM_TOPDIR)/includes |
INCLUDES = -I$(DRM_INCLUDES) -I$(DRM_INCLUDES)/drm \ |
-I$(DRM_INCLUDES)/linux -I$(DRM_INCLUDES)/asm |
DRV_INCLUDES = $(DRV_TOPDIR)/include |
INCLUDES = -I$(DRV_INCLUDES) -I$(DRV_INCLUDES)/drm \ |
-I$(DRV_INCLUDES)/linux |
CFLAGS = -c -O2 $(INCLUDES) $(DEFINES) -march=i686 -fomit-frame-pointer -fno-builtin-printf |
LIBPATH:= . |
LIBPATH:= $(DRV_TOPDIR)/ddk |
LIBS:= -ldrv -lcore |
22,15 → 24,15 |
NAME:= atikms |
HFILES:= $(DRM_INCLUDES)/linux/types.h \ |
$(DRM_INCLUDES)/linux/list.h \ |
$(DRM_INCLUDES)/linux/pci.h \ |
$(DRM_INCLUDES)/drm/drm.h \ |
$(DRM_INCLUDES)/drm/drmP.h \ |
$(DRM_INCLUDES)/drm/drm_edid.h \ |
$(DRM_INCLUDES)/drm/drm_crtc.h \ |
$(DRM_INCLUDES)/drm/drm_mode.h \ |
$(DRM_INCLUDES)/drm/drm_mm.h \ |
HFILES:= $(DRV_INCLUDES)/linux/types.h \ |
$(DRV_INCLUDES)/linux/list.h \ |
$(DRV_INCLUDES)/linux/pci.h \ |
$(DRV_INCLUDES)/drm/drm.h \ |
$(DRV_INCLUDES)/drm/drmP.h \ |
$(DRV_INCLUDES)/drm/drm_edid.h \ |
$(DRV_INCLUDES)/drm/drm_crtc.h \ |
$(DRV_INCLUDES)/drm/drm_mode.h \ |
$(DRV_INCLUDES)/drm/drm_mm.h \ |
atom.h \ |
radeon.h \ |
radeon_asic.h |
100,7 → 102,7 |
all: $(NAME).dll |
$(NAME).dll: $(NAME_OBJS) $(SRC_DEP) $(HFILES) atikms.lds Makefile |
ld -L$(LIBPATH) $(LDFLAGS) -T atikms.lds -o $@ $(NAME_OBJS) vsprintf.obj icompute.obj $(LIBS) |
ld -L$(LIBPATH) $(LDFLAGS) -T atikms.lds -o $@ $(NAME_OBJS) $(LIBS) |
%.o : %.c $(HFILES) Makefile |