Subversion Repositories Kolibri OS

Compare Revisions

Regard whitespace Rev 1613 → Rev 1616

/drivers/include/ddk.h
0,0 → 1,56
 
#ifndef __DDK_H__
#define __DDK_H__
 
#include <kernel.h>
 
#define OS_BASE 0x80000000
 
#define PG_SW 0x003
#define PG_NOCACHE 0x018
 
#define MANUAL_DESTROY 0x80000000
 
typedef struct
{
u32_t code;
u32_t data[5];
}kevent_t;
 
typedef union
{
struct
{
u32_t handle;
u32_t euid;
};
u64_t raw;
}evhandle_t;
 
typedef struct
{
u32_t handle;
u32_t io_code;
void *input;
int inp_size;
void *output;
int out_size;
}ioctl_t;
 
typedef int (__stdcall *srv_proc_t)(ioctl_t *);
 
#define ERR_OK 0
#define ERR_PARAM -1
 
 
struct ddk_params;
 
int ddk_init(struct ddk_params *params);
 
u32_t drvEntry(int, char *)__asm__("_drvEntry");
 
 
 
 
 
#endif /* DDK_H */
/drivers/include/linux/dmapool.h
0,0 → 1,26
/*
* include/linux/dmapool.h
*
* Allocation pools for DMAable (coherent) memory.
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
* warranty of any kind, whether express or implied.
*/
 
#ifndef LINUX_DMAPOOL_H
#define LINUX_DMAPOOL_H
 
struct dma_pool *dma_pool_create(const char *name, struct device *dev,
size_t size, size_t align, size_t allocation);
 
void dma_pool_destroy(struct dma_pool *pool);
 
void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags,
dma_addr_t *handle);
 
void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t addr);
 
 
#endif
 
/drivers/include/linux/mutex.h
0,0 → 1,86
/*
* Mutexes: blocking mutual exclusion locks
*
* started by Ingo Molnar:
*
* Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
*
* This file contains the main data structure and API definitions.
*/
#ifndef __LINUX_MUTEX_H
#define __LINUX_MUTEX_H
 
#include <kernel.h>
#include <linux/list.h>
#include <asm/atomic.h>
 
/*
* Simple, straightforward mutexes with strict semantics:
*
* - only one task can hold the mutex at a time
* - only the owner can unlock the mutex
* - multiple unlocks are not permitted
* - recursive locking is not permitted
* - a mutex object must be initialized via the API
* - a mutex object must not be initialized via memset or copying
* - task may not exit with mutex held
* - memory areas where held locks reside must not be freed
* - held mutexes must not be reinitialized
* - mutexes may not be used in hardware or software interrupt
* contexts such as tasklets and timers
*
* These semantics are fully enforced when DEBUG_MUTEXES is
* enabled. Furthermore, besides enforcing the above rules, the mutex
* debugging code also implements a number of additional features
* that make lock debugging easier and faster:
*
* - uses symbolic names of mutexes, whenever they are printed in debug output
* - point-of-acquire tracking, symbolic lookup of function names
* - list of all locks held in the system, printout of them
* - owner tracking
* - detects self-recursing locks and prints out all relevant info
* - detects multi-task circular deadlocks and prints out all affected
* locks and tasks (and only those tasks)
*/
struct mutex {
/* 1: unlocked, 0: locked, negative: locked, possible waiters */
atomic_t count;
struct list_head wait_list;
};
 
/*
* This is the control structure for tasks blocked on mutex,
* which resides on the blocked task's kernel stack:
*/
struct mutex_waiter {
struct list_head list;
int *task;
};
 
 
#define __MUTEX_INITIALIZER(lockname) \
{ .count = ATOMIC_INIT(1) \
, .wait_list = LIST_HEAD_INIT(lockname.wait_list) }
 
#define DEFINE_MUTEX(mutexname) \
struct mutex mutexname = __MUTEX_INITIALIZER(mutexname)
 
void __attribute__ ((fastcall)) __attribute__ ((dllimport))
mutex_init(struct mutex*)__asm__("MutexInit");
void __attribute__ ((fastcall)) __attribute__ ((dllimport))
mutex_lock(struct mutex*)__asm__("MutexLock");
void __attribute__ ((fastcall)) __attribute__ ((dllimport))
mutex_unlock(struct mutex*)__asm__("MutexUnlock");
 
/**
* mutex_is_locked - is the mutex locked
* @lock: the mutex to be queried
*
* Returns 1 if the mutex is locked, 0 if unlocked.
*/
static inline int mutex_is_locked(struct mutex *lock)
{
return atomic_read(&lock->count) != 1;
}
 
#endif
/drivers/include/syscall.h
2,33 → 2,6
#ifndef __SYSCALL_H__
#define __SYSCALL_H__
 
 
#define OS_BASE 0x80000000
 
typedef struct
{
u32_t code;
u32_t data[5];
}kevent_t;
 
typedef struct
{
u32_t handle;
u32_t io_code;
void *input;
int inp_size;
void *output;
int out_size;
}ioctl_t;
 
typedef int (__stdcall *srv_proc_t)(ioctl_t *);
 
#define ERR_OK 0
#define ERR_PARAM -1
 
 
u32_t drvEntry(int, char *)__asm__("_drvEntry");
 
///////////////////////////////////////////////////////////////////////////////
 
#define STDCALL __attribute__ ((stdcall)) __attribute__ ((dllimport))
40,14 → 13,11
 
#define SysMsgBoardStr __SysMsgBoardStr
#define PciApi __PciApi
//#define RegService __RegService
#define CreateObject __CreateObject
#define DestroyObject __DestroyObject
 
///////////////////////////////////////////////////////////////////////////////
 
#define PG_SW 0x003
#define PG_NOCACHE 0x018
 
void* STDCALL AllocKernelSpace(size_t size)__asm__("AllocKernelSpace");
void STDCALL FreeKernelSpace(void *mem)__asm__("FreeKernelSpace");
59,6 → 29,7
 
void* STDCALL GetDisplay(void)__asm__("GetDisplay");
 
u32_t IMPORT GetTimerTicks(void)__asm__("GetTimerTicks");
 
addr_t STDCALL AllocPage(void)__asm__("AllocPage");
addr_t STDCALL AllocPages(count_t count)__asm__("AllocPages");
78,8 → 49,6
void STDCALL SetMouseData(int btn, int x, int y,
int z, int h)__asm__("SetMouseData");
 
static u32_t PciApi(int cmd);
 
u8_t STDCALL PciRead8 (u32_t bus, u32_t devfn, u32_t reg)__asm__("PciRead8");
u16_t STDCALL PciRead16(u32_t bus, u32_t devfn, u32_t reg)__asm__("PciRead16");
u32_t STDCALL PciRead32(u32_t bus, u32_t devfn, u32_t reg)__asm__("PciRead32");
114,23 → 83,52
 
///////////////////////////////////////////////////////////////////////////////
 
 
static inline u32_t CreateEvent(kevent_t *ev, u32_t flags, u32_t *uid)
static inline evhandle_t CreateEvent(kevent_t *ev, u32_t flags)
{
u32_t handle;
u32_t euid;
evhandle_t evh;
 
__asm__ __volatile__ (
"call *__imp__CreateEvent"
:"=a"(handle),"=d"(euid)
:"S" (ev), "c"(flags));
:"=A"(evh.raw)
:"S" (ev), "c"(flags)
:"memory");
__asm__ __volatile__ ("":::"ebx","ecx", "esi", "edi");
 
if(uid) *uid = euid;
return evh;
};
 
static inline void RaiseEvent(evhandle_t evh, u32_t flags, kevent_t *ev)
{
__asm__ __volatile__ (
"call *__imp__RaiseEvent"
::"a"(evh.handle),"b"(evh.euid),"d"(flags),"S" (ev)
:"memory");
__asm__ __volatile__ ("":::"ebx","ecx", "esi", "edi");
 
};
 
static inline void WaitEvent(u32_t handle, u32_t euid)
{
__asm__ __volatile__ (
"call *__imp__WaitEvent"
::"a"(handle),"b"(euid));
__asm__ __volatile__ ("":::"ecx","edx", "esi");
};
 
static inline u32_t GetEvent(kevent_t *ev)
{
u32_t handle;
 
__asm__ __volatile__ (
"call *__imp__GetEvent"
:"=a"(handle)
:"D"(ev)
:"memory");
__asm__ __volatile__ ("":::"ebx","ecx","edx", "esi","edi");
return handle;
};
 
 
static inline int GetScreenSize(void)
{
int retval;
238,10 → 236,11
u32_t retval;
 
__asm__ __volatile__ (
"call *__imp__PciApi"
"call *__imp__PciApi \n\t"
"movzxb %%al, %%eax"
:"=a" (retval)
:"a" (cmd)
:"memory");
:"ebx","ecx","edx");
return retval;
};
 
294,13 → 293,10
return ifl;
}
 
static inline void safe_sti(u32_t ifl)
static inline void safe_sti(u32_t efl)
{
__asm__ __volatile__ (
"pushl %0\n\t"
"popf\n"
: : "r" (ifl)
);
if (efl & (1<<9))
__asm__ __volatile__ ("sti");
}
 
static inline u32_t get_eflags(void)
317,7 → 313,6
{
u32_t tmp;
__asm__ __volatile__ (
// "xorl %%eax, %%eax \n\t"
"cld \n\t"
"rep stosb \n"
:"=c"(tmp),"=D"(tmp)
411,6 → 406,9
pci_alloc_consistent(struct pci_dev *hwdev, size_t size,
addr_t *dma_handle)
{
 
size = (size + 0x7FFF) & ~0x7FFF;
 
*dma_handle = AllocPages(size >> 12);
return (void*)MapIoMem(*dma_handle, size, PG_SW+PG_NOCACHE);
}