/kernel/branches/kolibri_pe/include/atomic.h |
---|
0,0 → 1,131 |
/* |
* Copyright (c) 2006 Jakub Jermar |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
#ifndef KERN_ATOMIC_H_ |
#define KERN_ATOMIC_H_ |
typedef struct atomic { |
volatile long count; |
} atomic_t; |
static inline void atomic_inc(atomic_t *val) { |
#ifdef USE_SMP |
asm volatile ("lock inc %0\n" : "+m" (val->count)); |
#else |
asm volatile ("inc %0\n" : "+m" (val->count)); |
#endif /* USE_SMP */ |
} |
static inline void atomic_dec(atomic_t *val) { |
#ifdef USE_SMP |
asm volatile ("lock dec %0\n" : "+m" (val->count)); |
#else |
asm volatile ("dec %0\n" : "+m" (val->count)); |
#endif /* USE_SMP */ |
} |
/* |
static inline long atomic_postinc(atomic_t *val) |
{ |
long r = 1; |
asm volatile ( |
"lock xadd %1, %0\n" |
: "+m" (val->count), "+r" (r) |
); |
return r; |
} |
static inline long atomic_postdec(atomic_t *val) |
{ |
long r = -1; |
asm volatile ( |
"lock xadd %1, %0\n" |
: "+m" (val->count), "+r"(r) |
); |
return r; |
} |
#define atomic_preinc(val) (atomic_postinc(val) + 1) |
#define atomic_predec(val) (atomic_postdec(val) - 1) |
static inline u32_t test_and_set(atomic_t *val) { |
uint32_t v; |
asm volatile ( |
"movl $1, %0\n" |
"xchgl %0, %1\n" |
: "=r" (v),"+m" (val->count) |
); |
return v; |
} |
*/ |
/* ia32 specific fast spinlock */ |
static inline void atomic_lock_arch(atomic_t *val) |
{ |
u32_t tmp; |
// preemption_disable(); |
asm volatile ( |
"0:\n" |
"pause\n\t" /* Pentium 4's HT love this instruction */ |
"mov %1, [%0]\n\t" |
"test %1, %1\n\t" |
"jnz 0b\n\t" /* lightweight looping on locked spinlock */ |
"inc %1\n\t" /* now use the atomic operation */ |
"xchg [%0], %1\n\t" |
"test %1, %1\n\t" |
"jnz 0b\n\t" |
: "+m" (val->count), "=r"(tmp) |
); |
/* |
* Prevent critical section code from bleeding out this way up. |
*/ |
// CS_ENTER_BARRIER(); |
} |
static inline void atomic_set(atomic_t *val, long i) |
{ |
val->count = i; |
} |
static inline long atomic_get(atomic_t *val) |
{ |
return val->count; |
} |
#endif /* KERN_ATOMIC_H_ */ |
/kernel/branches/kolibri_pe/include/core.h |
---|
0,0 → 1,48 |
#define OS_BASE 0xE0000000 |
void printf (const char *format, ...); |
#define CALLER ((addr_t) __builtin_return_address(0)) |
extern void panic_printf(char *fmt, ...) __attribute__((noreturn)); |
#ifdef CONFIG_DEBUG |
# define panic(format, ...) \ |
panic_printf("Kernel panic in %s() at %s:%u: " format, __func__, \ |
__FILE__, __LINE__, ##__VA_ARGS__); |
# define ASSERT(expr) \ |
if (!(expr)) { \ |
panic("assertion failed (%s), caller=%p\n", #expr, CALLER); \ |
} |
#else |
# define panic(format, ...) \ |
panic_printf("Kernel panic: " format, ##__VA_ARGS__); |
# define ASSERT(expr) |
#endif |
static inline eflags_t safe_cli(void) |
{ |
eflags_t tmp; |
asm volatile ( |
"pushf\n\t" |
"pop %0\n\t" |
"cli\n" |
: "=r" (tmp) |
); |
return tmp; |
} |
static inline void safe_sti(eflags_t efl) |
{ |
asm volatile ( |
"push %0\n\t" |
"popf\n" |
: : "r" (efl) |
); |
} |
/kernel/branches/kolibri_pe/include/link.h |
---|
0,0 → 1,50 |
typedef struct link |
{ |
struct link *prev; |
struct link *next; |
}link_t; |
#define list_get_instance(link, type, member) \ |
((type *)(((u8_t *)(link)) - ((u8_t *)&(((type *)NULL)->member)))) |
static inline void link_initialize(link_t *link) |
{ |
link->prev = NULL; |
link->next = NULL; |
} |
static inline void list_initialize(link_t *head) |
{ |
head->prev = head; |
head->next = head; |
} |
static inline void list_append(link_t *link, link_t *head) |
{ |
link->prev = head->prev; |
link->next = head; |
head->prev->next = link; |
head->prev = link; |
} |
static inline void list_remove(link_t *link) |
{ |
link->next->prev = link->prev; |
link->prev->next = link->next; |
link_initialize(link); |
} |
static inline bool list_empty(link_t *head) |
{ |
return head->next == head ? true : false; |
} |
static inline void list_prepend(link_t *link, link_t *head) |
{ |
link->next = head->next; |
link->prev = head; |
head->next->prev = link; |
head->next = link; |
} |
/kernel/branches/kolibri_pe/include/mm.h |
---|
0,0 → 1,59 |
typedef struct |
{ |
link_t buddy_link; /**< link to the next free block inside one order */ |
count_t refcount; /**< tracking of shared frames */ |
u32_t buddy_order; /**< buddy system block order */ |
void *parent; /**< If allocated by slab, this points there */ |
} frame_t; |
typedef struct { |
SPINLOCK_DECLARE(lock); /**< this lock protects everything below */ |
pfn_t base; /**< frame_no of the first frame in the frames array */ |
count_t count; /**< Size of zone */ |
frame_t *frames; /**< array of frame_t structures in this zone */ |
count_t free_count; /**< number of free frame_t structures */ |
count_t busy_count; /**< number of busy frame_t structures */ |
u32_t max_order; |
link_t order[21]; |
int flags; |
} zone_t; |
# define PA2KA(x) (((u32_t) (x)) + OS_BASE) |
# define KA2PA(x) (((u32_t) (x)) - OS_BASE) |
#define PAGE_SIZE 4096 |
#define FRAME_WIDTH 12 |
#define BUDDY_SYSTEM_INNER_BLOCK 0xff |
static inline count_t SIZE2FRAMES(size_t size) |
{ |
if (!size) |
return 0; |
return (count_t) ((size - 1) >> FRAME_WIDTH) + 1; |
} |
static inline addr_t PFN2ADDR(pfn_t frame) |
{ |
return (addr_t) (frame << FRAME_WIDTH); |
} |
static inline pfn_t ADDR2PFN(addr_t addr) |
{ |
return (pfn_t) (addr >> FRAME_WIDTH); |
}; |
void init_mm(); |
pfn_t core_alloc(u32_t order); |
pfn_t alloc_page() __attribute__ ((deprecated)); |
pfn_t __stdcall alloc_pages(count_t count) __asm__ ("_alloc_pages") __attribute__ ((deprecated)); |
void core_free(pfn_t frame); |
void frame_free(pfn_t frame); |
/kernel/branches/kolibri_pe/include/spinlock.h |
---|
0,0 → 1,65 |
#include <atomic.h> |
#ifdef USE_SMP |
typedef struct |
{ |
atomic_t val; |
} spinlock_t; |
/* |
* SPINLOCK_DECLARE is to be used for dynamically allocated spinlocks, |
* where the lock gets initialized in run time. |
*/ |
#define SPINLOCK_DECLARE(slname) spinlock_t slname |
/* |
* SPINLOCK_INITIALIZE is to be used for statically allocated spinlocks. |
* It declares and initializes the lock. |
*/ |
#define SPINLOCK_INITIALIZE(slname) \ |
spinlock_t slname = { \ |
.val = { 0 } \ |
} |
extern void spinlock_initialize(spinlock_t *sl); |
extern int spinlock_trylock(spinlock_t *sl); |
#define spinlock_lock(x) atomic_lock_arch(&(x)->val) |
/** Unlock spinlock |
* |
* Unlock spinlock. |
* |
* @param sl Pointer to spinlock_t structure. |
*/ |
static inline void spinlock_unlock(spinlock_t *sl) |
{ |
ASSERT(atomic_get(&sl->val) != 0); |
/* |
* Prevent critical section code from bleeding out this way down. |
*/ |
// CS_LEAVE_BARRIER(); |
atomic_set(&sl->val, 0); |
// preemption_enable(); |
} |
#else |
/* On UP systems, spinlocks are effectively left out. */ |
#define SPINLOCK_DECLARE(name) |
#define SPINLOCK_EXTERN(name) |
#define SPINLOCK_INITIALIZE(name) |
#define spinlock_initialize(x) |
#define spinlock_lock(x) |
#define spinlock_trylock(x) |
#define spinlock_unlock(x) |
#endif |
/kernel/branches/kolibri_pe/include/types.h |
---|
0,0 → 1,24 |
#define NULL (void*)0 |
typedef unsigned char u8_t; |
typedef unsigned short int u16_t; |
typedef unsigned int u32_t; |
typedef unsigned long long u64_t; |
typedef u32_t addr_t; |
typedef u32_t pfn_t; |
typedef u32_t count_t; |
typedef u32_t size_t; |
typedef u32_t index_t; |
typedef u32_t eflags_t; |
typedef int bool; |
#define true (bool)1 |
#define false (bool)0 |