/drivers/ddk/Makefile |
---|
1,5 → 1,4 |
CC = gcc |
AS = as |
6,8 → 5,14 |
DRV_TOPDIR = $(CURDIR)/.. |
DRV_INCLUDES = $(DRV_TOPDIR)/include |
INCLUDES = -I$(DRV_INCLUDES) -I$(DRV_INCLUDES)/linux -I$(DRV_INCLUDES)/linux/asm |
DEFINES = -DKOLIBRI -D__KERNEL__ -DCONFIG_X86_32 -DCONFIG_DMI |
INCLUDES = -I$(DRV_INCLUDES) \ |
-I$(DRV_INCLUDES)/asm \ |
-I$(DRV_INCLUDES)/uapi |
DEFINES = -DKOLIBRI -D__KERNEL__ -DCONFIG_X86_32 -DCONFIG_DMI -DCONFIG_TINY_RCU |
DEFINES+= -DCONFIG_X86_L1_CACHE_SHIFT=6 -DCONFIG_ARCH_HAS_CACHE_LINE_SIZE |
CFLAGS = -c -Os $(INCLUDES) $(DEFINES) -march=i686 -fomit-frame-pointer -fno-builtin-printf \ |
-mno-stack-arg-probe -mpreferred-stack-boundary=2 -mincoming-stack-boundary=2 |
25,6 → 30,7 |
io/write.c \ |
linux/bitmap.c \ |
linux/dmi.c \ |
linux/find_next_bit.c \ |
linux/idr.c \ |
linux/interval_tree.c \ |
linux/firmware.c \ |
/drivers/ddk/debug/dbglog.c |
---|
1,6 → 1,6 |
#include <ddk.h> |
#include <mutex.h> |
#include <linux/mutex.h> |
#include <syscall.h> |
#pragma pack(push, 1) |
/drivers/ddk/linux/bitmap.c |
---|
132,7 → 132,9 |
lower = src[off + k]; |
if (left && off + k == lim - 1) |
lower &= mask; |
dst[k] = upper << (BITS_PER_LONG - rem) | lower >> rem; |
dst[k] = lower >> rem; |
if (rem) |
dst[k] |= upper << (BITS_PER_LONG - rem); |
if (left && k == lim - 1) |
dst[k] &= mask; |
} |
173,7 → 175,9 |
upper = src[k]; |
if (left && k == lim - 1) |
upper &= (1UL << left) - 1; |
dst[k + off] = lower >> (BITS_PER_LONG - rem) | upper << rem; |
dst[k + off] = upper << rem; |
if (rem) |
dst[k + off] |= lower >> (BITS_PER_LONG - rem); |
if (left && k + off == lim - 1) |
dst[k + off] &= (1UL << left) - 1; |
} |
323,23 → 327,25 |
} |
EXPORT_SYMBOL(bitmap_clear); |
/* |
* bitmap_find_next_zero_area - find a contiguous aligned zero area |
/** |
* bitmap_find_next_zero_area_off - find a contiguous aligned zero area |
* @map: The address to base the search on |
* @size: The bitmap size in bits |
* @start: The bitnumber to start searching at |
* @nr: The number of zeroed bits we're looking for |
* @align_mask: Alignment mask for zero area |
* @align_offset: Alignment offset for zero area. |
* |
* The @align_mask should be one less than a power of 2; the effect is that |
* the bit offset of all zero areas this function finds is multiples of that |
* power of 2. A @align_mask of 0 means no alignment is required. |
* the bit offset of all zero areas this function finds plus @align_offset |
* is multiple of that power of 2. |
*/ |
unsigned long bitmap_find_next_zero_area(unsigned long *map, |
unsigned long bitmap_find_next_zero_area_off(unsigned long *map, |
unsigned long size, |
unsigned long start, |
unsigned int nr, |
unsigned long align_mask) |
unsigned long align_mask, |
unsigned long align_offset) |
{ |
unsigned long index, end, i; |
again: |
346,7 → 352,7 |
index = find_next_zero_bit(map, size, start); |
/* Align allocation */ |
index = __ALIGN_MASK(index, align_mask); |
index = __ALIGN_MASK(index + align_offset, align_mask) - align_offset; |
end = index + nr; |
if (end > size) |
358,7 → 364,7 |
} |
return index; |
} |
EXPORT_SYMBOL(bitmap_find_next_zero_area); |
EXPORT_SYMBOL(bitmap_find_next_zero_area_off); |
/* |
* Bitmap printing & parsing functions: first version by Nadia Yvette Chambers, |
599,7 → 605,7 |
* |
* Further lets say we use the following code, invoking |
* bitmap_fold() then bitmap_onto, as suggested above to |
* avoid the possitility of an empty @dst result: |
* avoid the possibility of an empty @dst result: |
* |
* unsigned long *tmp; // a temporary bitmap's bits |
* |
/drivers/ddk/linux/dmapool.c |
---|
24,9 → 24,11 |
#include <ddk.h> |
#include <linux/slab.h> |
#include <linux/errno.h> |
#include <linux/mutex.h> |
#include <pci.h> |
#include <linux/pci.h> |
#include <linux/gfp.h> |
#include <syscall.h> |
142,7 → 144,7 |
{ |
struct dma_page *page; |
page = malloc(sizeof(*page)); |
page = __builtin_malloc(sizeof(*page)); |
if (!page) |
return NULL; |
page->vaddr = (void*)KernelAlloc(pool->allocation); |
228,7 → 230,7 |
void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags, |
dma_addr_t *handle) |
{ |
u32_t efl; |
u32 efl; |
struct dma_page *page; |
size_t offset; |
void *retval; |
262,7 → 264,7 |
static struct dma_page *pool_find_page(struct dma_pool *pool, dma_addr_t dma) |
{ |
struct dma_page *page; |
u32_t efl; |
u32 efl; |
efl = safe_cli(); |
294,7 → 296,7 |
unsigned long flags; |
unsigned int offset; |
u32_t efl; |
u32 efl; |
page = pool_find_page(pool, dma); |
if (!page) { |
/drivers/ddk/linux/dmi.c |
---|
7,12 → 7,9 |
#include <linux/dmi.h> |
#include <syscall.h> |
#define pr_debug dbgprintf |
#define pr_info printf |
static void *dmi_alloc(unsigned len) |
{ |
return malloc(len); |
return __builtin_malloc(len); |
}; |
/* |
/drivers/ddk/linux/find_next_bit.c |
---|
0,0 → 1,285 |
/* find_next_bit.c: fallback find next bit implementation |
* |
* Copyright (C) 2004 Red Hat, Inc. All Rights Reserved. |
* Written by David Howells (dhowells@redhat.com) |
* |
* This program is free software; you can redistribute it and/or |
* modify it under the terms of the GNU General Public License |
* as published by the Free Software Foundation; either version |
* 2 of the License, or (at your option) any later version. |
*/ |
#include <linux/bitops.h> |
#include <linux/export.h> |
#include <asm/types.h> |
#include <asm/byteorder.h> |
#define BITOP_WORD(nr) ((nr) / BITS_PER_LONG) |
#ifndef find_next_bit |
/* |
* Find the next set bit in a memory region. |
*/ |
unsigned long find_next_bit(const unsigned long *addr, unsigned long size, |
unsigned long offset) |
{ |
const unsigned long *p = addr + BITOP_WORD(offset); |
unsigned long result = offset & ~(BITS_PER_LONG-1); |
unsigned long tmp; |
if (offset >= size) |
return size; |
size -= result; |
offset %= BITS_PER_LONG; |
if (offset) { |
tmp = *(p++); |
tmp &= (~0UL << offset); |
if (size < BITS_PER_LONG) |
goto found_first; |
if (tmp) |
goto found_middle; |
size -= BITS_PER_LONG; |
result += BITS_PER_LONG; |
} |
while (size & ~(BITS_PER_LONG-1)) { |
if ((tmp = *(p++))) |
goto found_middle; |
result += BITS_PER_LONG; |
size -= BITS_PER_LONG; |
} |
if (!size) |
return result; |
tmp = *p; |
found_first: |
tmp &= (~0UL >> (BITS_PER_LONG - size)); |
if (tmp == 0UL) /* Are any bits set? */ |
return result + size; /* Nope. */ |
found_middle: |
return result + __ffs(tmp); |
} |
EXPORT_SYMBOL(find_next_bit); |
#endif |
#ifndef find_next_zero_bit |
/* |
* This implementation of find_{first,next}_zero_bit was stolen from |
* Linus' asm-alpha/bitops.h. |
*/ |
unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size, |
unsigned long offset) |
{ |
const unsigned long *p = addr + BITOP_WORD(offset); |
unsigned long result = offset & ~(BITS_PER_LONG-1); |
unsigned long tmp; |
if (offset >= size) |
return size; |
size -= result; |
offset %= BITS_PER_LONG; |
if (offset) { |
tmp = *(p++); |
tmp |= ~0UL >> (BITS_PER_LONG - offset); |
if (size < BITS_PER_LONG) |
goto found_first; |
if (~tmp) |
goto found_middle; |
size -= BITS_PER_LONG; |
result += BITS_PER_LONG; |
} |
while (size & ~(BITS_PER_LONG-1)) { |
if (~(tmp = *(p++))) |
goto found_middle; |
result += BITS_PER_LONG; |
size -= BITS_PER_LONG; |
} |
if (!size) |
return result; |
tmp = *p; |
found_first: |
tmp |= ~0UL << size; |
if (tmp == ~0UL) /* Are any bits zero? */ |
return result + size; /* Nope. */ |
found_middle: |
return result + ffz(tmp); |
} |
EXPORT_SYMBOL(find_next_zero_bit); |
#endif |
#ifndef find_first_bit |
/* |
* Find the first set bit in a memory region. |
*/ |
unsigned long find_first_bit(const unsigned long *addr, unsigned long size) |
{ |
const unsigned long *p = addr; |
unsigned long result = 0; |
unsigned long tmp; |
while (size & ~(BITS_PER_LONG-1)) { |
if ((tmp = *(p++))) |
goto found; |
result += BITS_PER_LONG; |
size -= BITS_PER_LONG; |
} |
if (!size) |
return result; |
tmp = (*p) & (~0UL >> (BITS_PER_LONG - size)); |
if (tmp == 0UL) /* Are any bits set? */ |
return result + size; /* Nope. */ |
found: |
return result + __ffs(tmp); |
} |
EXPORT_SYMBOL(find_first_bit); |
#endif |
#ifndef find_first_zero_bit |
/* |
* Find the first cleared bit in a memory region. |
*/ |
unsigned long find_first_zero_bit(const unsigned long *addr, unsigned long size) |
{ |
const unsigned long *p = addr; |
unsigned long result = 0; |
unsigned long tmp; |
while (size & ~(BITS_PER_LONG-1)) { |
if (~(tmp = *(p++))) |
goto found; |
result += BITS_PER_LONG; |
size -= BITS_PER_LONG; |
} |
if (!size) |
return result; |
tmp = (*p) | (~0UL << size); |
if (tmp == ~0UL) /* Are any bits zero? */ |
return result + size; /* Nope. */ |
found: |
return result + ffz(tmp); |
} |
EXPORT_SYMBOL(find_first_zero_bit); |
#endif |
#ifdef __BIG_ENDIAN |
/* include/linux/byteorder does not support "unsigned long" type */ |
static inline unsigned long ext2_swabp(const unsigned long * x) |
{ |
#if BITS_PER_LONG == 64 |
return (unsigned long) __swab64p((u64 *) x); |
#elif BITS_PER_LONG == 32 |
return (unsigned long) __swab32p((u32 *) x); |
#else |
#error BITS_PER_LONG not defined |
#endif |
} |
/* include/linux/byteorder doesn't support "unsigned long" type */ |
static inline unsigned long ext2_swab(const unsigned long y) |
{ |
#if BITS_PER_LONG == 64 |
return (unsigned long) __swab64((u64) y); |
#elif BITS_PER_LONG == 32 |
return (unsigned long) __swab32((u32) y); |
#else |
#error BITS_PER_LONG not defined |
#endif |
} |
#ifndef find_next_zero_bit_le |
unsigned long find_next_zero_bit_le(const void *addr, unsigned |
long size, unsigned long offset) |
{ |
const unsigned long *p = addr; |
unsigned long result = offset & ~(BITS_PER_LONG - 1); |
unsigned long tmp; |
if (offset >= size) |
return size; |
p += BITOP_WORD(offset); |
size -= result; |
offset &= (BITS_PER_LONG - 1UL); |
if (offset) { |
tmp = ext2_swabp(p++); |
tmp |= (~0UL >> (BITS_PER_LONG - offset)); |
if (size < BITS_PER_LONG) |
goto found_first; |
if (~tmp) |
goto found_middle; |
size -= BITS_PER_LONG; |
result += BITS_PER_LONG; |
} |
while (size & ~(BITS_PER_LONG - 1)) { |
if (~(tmp = *(p++))) |
goto found_middle_swap; |
result += BITS_PER_LONG; |
size -= BITS_PER_LONG; |
} |
if (!size) |
return result; |
tmp = ext2_swabp(p); |
found_first: |
tmp |= ~0UL << size; |
if (tmp == ~0UL) /* Are any bits zero? */ |
return result + size; /* Nope. Skip ffz */ |
found_middle: |
return result + ffz(tmp); |
found_middle_swap: |
return result + ffz(ext2_swab(tmp)); |
} |
EXPORT_SYMBOL(find_next_zero_bit_le); |
#endif |
#ifndef find_next_bit_le |
unsigned long find_next_bit_le(const void *addr, unsigned |
long size, unsigned long offset) |
{ |
const unsigned long *p = addr; |
unsigned long result = offset & ~(BITS_PER_LONG - 1); |
unsigned long tmp; |
if (offset >= size) |
return size; |
p += BITOP_WORD(offset); |
size -= result; |
offset &= (BITS_PER_LONG - 1UL); |
if (offset) { |
tmp = ext2_swabp(p++); |
tmp &= (~0UL << offset); |
if (size < BITS_PER_LONG) |
goto found_first; |
if (tmp) |
goto found_middle; |
size -= BITS_PER_LONG; |
result += BITS_PER_LONG; |
} |
while (size & ~(BITS_PER_LONG - 1)) { |
tmp = *(p++); |
if (tmp) |
goto found_middle_swap; |
result += BITS_PER_LONG; |
size -= BITS_PER_LONG; |
} |
if (!size) |
return result; |
tmp = ext2_swabp(p); |
found_first: |
tmp &= (~0UL >> (BITS_PER_LONG - size)); |
if (tmp == 0UL) /* Are any bits set? */ |
return result + size; /* Nope. */ |
found_middle: |
return result + __ffs(tmp); |
found_middle_swap: |
return result + __ffs(ext2_swab(tmp)); |
} |
EXPORT_SYMBOL(find_next_bit_le); |
#endif |
#endif /* __BIG_ENDIAN */ |
/drivers/ddk/linux/firmware.c |
---|
1,6 → 1,8 |
#include <linux/kernel.h> |
#include <linux/slab.h> |
#include <linux/byteorder/little_endian.h> |
#include <linux/gfp.h> |
#include <linux/errno.h> |
#include <linux/firmware.h> |
/drivers/ddk/linux/idr.c |
---|
20,20 → 20,16 |
* that id to this code and it returns your pointer. |
*/ |
#include <linux/kernel.h> |
#ifndef TEST // to test in user space... |
#include <linux/slab.h> |
#include <linux/export.h> |
#endif |
#include <linux/err.h> |
#include <linux/string.h> |
#include <linux/bitops.h> |
#include <linux/idr.h> |
//#include <stdlib.h> |
#include <linux/spinlock.h> |
static inline void * __must_check ERR_PTR(long error) |
{ |
return (void *) error; |
} |
unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size, |
unsigned long offset); |
#define MAX_IDR_SHIFT (sizeof(int) * 8 - 1) |
132,7 → 128,7 |
{ |
if (idr->hint == p) |
RCU_INIT_POINTER(idr->hint, NULL); |
idr_layer_rcu_free(&p->rcu_head); |
call_rcu(&p->rcu_head, idr_layer_rcu_free); |
} |
/* only called when idp->lock is held */ |
500,7 → 496,7 |
n = id & IDR_MASK; |
if (likely(p != NULL && test_bit(n, p->bitmap))) { |
__clear_bit(n, p->bitmap); |
rcu_assign_pointer(p->ary[n], NULL); |
RCU_INIT_POINTER(p->ary[n], NULL); |
to_free = NULL; |
while(*paa && ! --((**paa)->count)){ |
if (to_free) |
564,7 → 560,7 |
n = idp->layers * IDR_BITS; |
*paa = idp->top; |
rcu_assign_pointer(idp->top, NULL); |
RCU_INIT_POINTER(idp->top, NULL); |
max = idr_max(idp->layers); |
id = 0; |
599,7 → 595,7 |
* idr_destroy(). |
* |
* A typical clean-up sequence for objects stored in an idr tree will use |
* idr_for_each() to free all objects, if necessay, then idr_destroy() to |
* idr_for_each() to free all objects, if necessary, then idr_destroy() to |
* free up the id mappings and cached idr_layers. |
*/ |
void idr_destroy(struct idr *idp) |
1119,129 → 1115,3 |
} |
EXPORT_SYMBOL(ida_init); |
unsigned long find_first_bit(const unsigned long *addr, unsigned long size) |
{ |
const unsigned long *p = addr; |
unsigned long result = 0; |
unsigned long tmp; |
while (size & ~(BITS_PER_LONG-1)) { |
if ((tmp = *(p++))) |
goto found; |
result += BITS_PER_LONG; |
size -= BITS_PER_LONG; |
} |
if (!size) |
return result; |
tmp = (*p) & (~0UL >> (BITS_PER_LONG - size)); |
if (tmp == 0UL) /* Are any bits set? */ |
return result + size; /* Nope. */ |
found: |
return result + __ffs(tmp); |
} |
unsigned long find_next_bit(const unsigned long *addr, unsigned long size, |
unsigned long offset) |
{ |
const unsigned long *p = addr + BITOP_WORD(offset); |
unsigned long result = offset & ~(BITS_PER_LONG-1); |
unsigned long tmp; |
if (offset >= size) |
return size; |
size -= result; |
offset %= BITS_PER_LONG; |
if (offset) { |
tmp = *(p++); |
tmp &= (~0UL << offset); |
if (size < BITS_PER_LONG) |
goto found_first; |
if (tmp) |
goto found_middle; |
size -= BITS_PER_LONG; |
result += BITS_PER_LONG; |
} |
while (size & ~(BITS_PER_LONG-1)) { |
if ((tmp = *(p++))) |
goto found_middle; |
result += BITS_PER_LONG; |
size -= BITS_PER_LONG; |
} |
if (!size) |
return result; |
tmp = *p; |
found_first: |
tmp &= (~0UL >> (BITS_PER_LONG - size)); |
if (tmp == 0UL) /* Are any bits set? */ |
return result + size; /* Nope. */ |
found_middle: |
return result + __ffs(tmp); |
} |
unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size, |
unsigned long offset) |
{ |
const unsigned long *p = addr + BITOP_WORD(offset); |
unsigned long result = offset & ~(BITS_PER_LONG-1); |
unsigned long tmp; |
if (offset >= size) |
return size; |
size -= result; |
offset %= BITS_PER_LONG; |
if (offset) { |
tmp = *(p++); |
tmp |= ~0UL >> (BITS_PER_LONG - offset); |
if (size < BITS_PER_LONG) |
goto found_first; |
if (~tmp) |
goto found_middle; |
size -= BITS_PER_LONG; |
result += BITS_PER_LONG; |
} |
while (size & ~(BITS_PER_LONG-1)) { |
if (~(tmp = *(p++))) |
goto found_middle; |
result += BITS_PER_LONG; |
size -= BITS_PER_LONG; |
} |
if (!size) |
return result; |
tmp = *p; |
found_first: |
tmp |= ~0UL << size; |
if (tmp == ~0UL) /* Are any bits zero? */ |
return result + size; /* Nope. */ |
found_middle: |
return result + ffz(tmp); |
} |
unsigned int hweight32(unsigned int w) |
{ |
unsigned int res = w - ((w >> 1) & 0x55555555); |
res = (res & 0x33333333) + ((res >> 2) & 0x33333333); |
res = (res + (res >> 4)) & 0x0F0F0F0F; |
res = res + (res >> 8); |
return (res + (res >> 16)) & 0x000000FF; |
} |
unsigned long hweight64(__u64 w) |
{ |
#if BITS_PER_LONG == 32 |
return hweight32((unsigned int)(w >> 32)) + hweight32((unsigned int)w); |
#elif BITS_PER_LONG == 64 |
__u64 res = w - ((w >> 1) & 0x5555555555555555ul); |
res = (res & 0x3333333333333333ul) + ((res >> 2) & 0x3333333333333333ul); |
res = (res + (res >> 4)) & 0x0F0F0F0F0F0F0F0Ful; |
res = res + (res >> 8); |
res = res + (res >> 16); |
return (res + (res >> 32)) & 0x00000000000000FFul; |
#endif |
} |
/drivers/ddk/linux/list_sort.c |
---|
1,101 → 1,145 |
#define pr_fmt(fmt) "list_sort_test: " fmt |
#include <linux/kernel.h> |
#include <linux/module.h> |
#include <linux/list_sort.h> |
#include <linux/slab.h> |
#include <linux/list.h> |
#define MAX_LIST_LENGTH_BITS 20 |
/* |
* Returns a list organized in an intermediate format suited |
* to chaining of merge() calls: null-terminated, no reserved or |
* sentinel head node, "prev" links not maintained. |
*/ |
static struct list_head *merge(void *priv, |
int (*cmp)(void *priv, struct list_head *a, |
struct list_head *b), |
struct list_head *a, struct list_head *b) |
{ |
struct list_head head, *tail = &head; |
while (a && b) { |
/* if equal, take 'a' -- important for sort stability */ |
if ((*cmp)(priv, a, b) <= 0) { |
tail->next = a; |
a = a->next; |
} else { |
tail->next = b; |
b = b->next; |
} |
tail = tail->next; |
} |
tail->next = a?:b; |
return head.next; |
} |
/* |
* Combine final list merge with restoration of standard doubly-linked |
* list structure. This approach duplicates code from merge(), but |
* runs faster than the tidier alternatives of either a separate final |
* prev-link restoration pass, or maintaining the prev links |
* throughout. |
*/ |
static void merge_and_restore_back_links(void *priv, |
int (*cmp)(void *priv, struct list_head *a, |
struct list_head *b), |
struct list_head *head, |
struct list_head *a, struct list_head *b) |
{ |
struct list_head *tail = head; |
u8 count = 0; |
while (a && b) { |
/* if equal, take 'a' -- important for sort stability */ |
if ((*cmp)(priv, a, b) <= 0) { |
tail->next = a; |
a->prev = tail; |
a = a->next; |
} else { |
tail->next = b; |
b->prev = tail; |
b = b->next; |
} |
tail = tail->next; |
} |
tail->next = a ? : b; |
do { |
/* |
* In worst cases this loop may run many iterations. |
* Continue callbacks to the client even though no |
* element comparison is needed, so the client's cmp() |
* routine can invoke cond_resched() periodically. |
*/ |
if (unlikely(!(++count))) |
(*cmp)(priv, tail->next, tail->next); |
tail->next->prev = tail; |
tail = tail->next; |
} while (tail->next); |
tail->next = head; |
head->prev = tail; |
} |
/** |
* list_sort - sort a list. |
* @priv: private data, passed to @cmp |
* list_sort - sort a list |
* @priv: private data, opaque to list_sort(), passed to @cmp |
* @head: the list to sort |
* @cmp: the elements comparison function |
* |
* This function has been implemented by Mark J Roberts <mjr@znex.org>. It |
* implements "merge sort" which has O(nlog(n)) complexity. The list is sorted |
* in ascending order. |
* This function implements "merge sort", which has O(nlog(n)) |
* complexity. |
* |
* The comparison function @cmp is supposed to return a negative value if @a is |
* less than @b, and a positive value if @a is greater than @b. If @a and @b |
* are equivalent, then it does not matter what this function returns. |
* The comparison function @cmp must return a negative value if @a |
* should sort before @b, and a positive value if @a should sort after |
* @b. If @a and @b are equivalent, and their original relative |
* ordering is to be preserved, @cmp must return 0. |
*/ |
void list_sort(void *priv, struct list_head *head, |
int (*cmp)(void *priv, struct list_head *a, |
struct list_head *b)) |
{ |
struct list_head *p, *q, *e, *list, *tail, *oldhead; |
int insize, nmerges, psize, qsize, i; |
struct list_head *part[MAX_LIST_LENGTH_BITS+1]; /* sorted partial lists |
-- last slot is a sentinel */ |
int lev; /* index into part[] */ |
int max_lev = 0; |
struct list_head *list; |
if (list_empty(head)) |
return; |
memset(part, 0, sizeof(part)); |
head->prev->next = NULL; |
list = head->next; |
list_del(head); |
insize = 1; |
for (;;) { |
p = oldhead = list; |
list = tail = NULL; |
nmerges = 0; |
while (p) { |
nmerges++; |
q = p; |
psize = 0; |
for (i = 0; i < insize; i++) { |
psize++; |
q = q->next == oldhead ? NULL : q->next; |
if (!q) |
break; |
} |
while (list) { |
struct list_head *cur = list; |
list = list->next; |
cur->next = NULL; |
qsize = insize; |
while (psize > 0 || (qsize > 0 && q)) { |
if (!psize) { |
e = q; |
q = q->next; |
qsize--; |
if (q == oldhead) |
q = NULL; |
} else if (!qsize || !q) { |
e = p; |
p = p->next; |
psize--; |
if (p == oldhead) |
p = NULL; |
} else if (cmp(priv, p, q) <= 0) { |
e = p; |
p = p->next; |
psize--; |
if (p == oldhead) |
p = NULL; |
} else { |
e = q; |
q = q->next; |
qsize--; |
if (q == oldhead) |
q = NULL; |
for (lev = 0; part[lev]; lev++) { |
cur = merge(priv, cmp, part[lev], cur); |
part[lev] = NULL; |
} |
if (tail) |
tail->next = e; |
else |
list = e; |
e->prev = tail; |
tail = e; |
if (lev > max_lev) { |
if (unlikely(lev >= ARRAY_SIZE(part)-1)) { |
printk_once(KERN_DEBUG "list too long for efficiency\n"); |
lev--; |
} |
p = q; |
max_lev = lev; |
} |
part[lev] = cur; |
} |
tail->next = list; |
list->prev = tail; |
for (lev = 0; lev < max_lev; lev++) |
if (part[lev]) |
list = merge(priv, cmp, part[lev], list); |
if (nmerges <= 1) |
break; |
insize *= 2; |
merge_and_restore_back_links(priv, cmp, head, part[max_lev], list); |
} |
head->next = list; |
head->prev = list->prev; |
list->prev->next = head; |
list->prev = head; |
} |
EXPORT_SYMBOL(list_sort); |
/drivers/ddk/linux/rbtree.c |
---|
101,7 → 101,7 |
* / \ / \ |
* p u --> P U |
* / / |
* n N |
* n n |
* |
* However, since g's parent might be red, and |
* 4) does not allow this, we need to recurse |
/drivers/ddk/linux/scatterlist.c |
---|
7,6 → 7,7 |
* Version 2. See the file COPYING for more details. |
*/ |
#include <linux/export.h> |
#include <linux/slab.h> |
#include <linux/scatterlist.h> |
/** |
70,7 → 71,7 |
**/ |
struct scatterlist *sg_last(struct scatterlist *sgl, unsigned int nents) |
{ |
#ifndef ARCH_HAS_SG_CHAIN |
#ifndef CONFIG_ARCH_HAS_SG_CHAIN |
struct scatterlist *ret = &sgl[nents - 1]; |
#else |
struct scatterlist *sg, *ret = NULL; |
182,10 → 183,10 |
} |
table->orig_nents -= sg_size; |
if (!skip_first_chunk) { |
if (skip_first_chunk) |
skip_first_chunk = false; |
else |
free_fn(sgl, alloc_size); |
skip_first_chunk = false; |
} |
sgl = next; |
} |
234,7 → 235,7 |
if (nents == 0) |
return -EINVAL; |
#ifndef ARCH_HAS_SG_CHAIN |
#ifndef CONFIG_ARCH_HAS_SG_CHAIN |
if (WARN_ON_ONCE(nents > max_ents)) |
return -EINVAL; |
#endif |
/drivers/ddk/linux/string.c |
---|
27,7 → 27,7 |
#ifndef __HAVE_ARCH_STRLCPY |
/** |
* strlcpy - Copy a %NUL terminated string into a sized buffer |
* strlcpy - Copy a C-string into a sized buffer |
* @dest: Where to copy the string to |
* @src: Where to copy the string from |
* @size: size of destination buffer |
/drivers/ddk/linux/time.c |
---|
1,4 → 1,4 |
#include <jiffies.h> |
#include <linux/jiffies.h> |
131,6 → 131,7 |
>> MSEC_TO_HZ_SHR32; |
#endif |
} |
EXPORT_SYMBOL(msecs_to_jiffies); |
unsigned long usecs_to_jiffies(const unsigned int u) |
{ |
145,12 → 146,27 |
>> USEC_TO_HZ_SHR32; |
#endif |
} |
EXPORT_SYMBOL(usecs_to_jiffies); |
unsigned long |
timespec_to_jiffies(const struct timespec *value) |
/* |
* The TICK_NSEC - 1 rounds up the value to the next resolution. Note |
* that a remainder subtract here would not do the right thing as the |
* resolution values don't fall on second boundries. I.e. the line: |
* nsec -= nsec % TICK_NSEC; is NOT a correct resolution rounding. |
* Note that due to the small error in the multiplier here, this |
* rounding is incorrect for sufficiently large values of tv_nsec, but |
* well formed timespecs should have tv_nsec < NSEC_PER_SEC, so we're |
* OK. |
* |
* Rather, we just shift the bits off the right. |
* |
* The >> (NSEC_JIFFIE_SC - SEC_JIFFIE_SC) converts the scaled nsec |
* value to a scaled second value. |
*/ |
static unsigned long |
__timespec_to_jiffies(unsigned long sec, long nsec) |
{ |
unsigned long sec = value->tv_sec; |
long nsec = value->tv_nsec + TICK_NSEC - 1; |
nsec = nsec + TICK_NSEC - 1; |
if (sec >= MAX_SEC_IN_JIFFIES){ |
sec = MAX_SEC_IN_JIFFIES; |
162,6 → 178,28 |
} |
unsigned long |
timespec_to_jiffies(const struct timespec *value) |
{ |
return __timespec_to_jiffies(value->tv_sec, value->tv_nsec); |
} |
EXPORT_SYMBOL(timespec_to_jiffies); |
void |
jiffies_to_timespec(const unsigned long jiffies, struct timespec *value) |
{ |
/* |
* Convert jiffies to nanoseconds and separate with |
* one divide. |
*/ |
u32 rem; |
value->tv_sec = div_u64_rem((u64)jiffies * TICK_NSEC, |
NSEC_PER_SEC, &rem); |
value->tv_nsec = rem; |
} |
EXPORT_SYMBOL(jiffies_to_timespec); |
s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder) |
{ |
u64 quotient; |
/drivers/ddk/linux/workqueue.c |
---|
1,5 → 1,39 |
/* |
* kernel/workqueue.c - generic async execution with shared worker pool |
* |
* Copyright (C) 2002 Ingo Molnar |
* |
* Derived from the taskqueue/keventd code by: |
* David Woodhouse <dwmw2@infradead.org> |
* Andrew Morton |
* Kai Petzke <wpp@marie.physik.tu-berlin.de> |
* Theodore Ts'o <tytso@mit.edu> |
* |
* Made to use alloc_percpu by Christoph Lameter. |
* |
* Copyright (C) 2010 SUSE Linux Products GmbH |
* Copyright (C) 2010 Tejun Heo <tj@kernel.org> |
* |
* This is the generic async execution mechanism. Work items as are |
* executed in process context. The worker pool is shared and |
* automatically managed. There are two worker pools for each CPU (one for |
* normal work items and the other for high priority ones) and some extra |
* pools for workqueues which are not bound to any specific CPU - the |
* number of these backing pools is dynamic. |
* |
* Please read Documentation/workqueue.txt for details. |
*/ |
#include <linux/export.h> |
#include <linux/kernel.h> |
#include <linux/sched.h> |
#include <linux/completion.h> |
#include <linux/workqueue.h> |
#include <linux/slab.h> |
#include <linux/lockdep.h> |
#include <linux/idr.h> |
#include <ddk.h> |
extern int driver_wq_state; |
/drivers/ddk/malloc/malloc.c |
---|
522,7 → 522,7 |
*/ |
#include <ddk.h> |
#include <mutex.h> |
#include <linux/mutex.h> |
#include <syscall.h> |
/* Version identifier to allow people to support multiple versions */ |
/drivers/ddk/stdio/vsprintf.c |
---|
22,11 → 22,12 |
#include <linux/string.h> |
#include <linux/ctype.h> |
#include <linux/kernel.h> |
#include <errno-base.h> |
#include <linux/ioport.h> |
#include <linux/export.h> |
#include <asm/div64.h> |
#include <asm/page.h> /* for PAGE_SIZE */ |
static inline u64 div_u64(u64 dividend, u32 divisor) |
41,10 → 42,6 |
return div_s64_rem(dividend, divisor, &remainder); |
} |
struct va_format { |
const char *fmt; |
va_list *va; |
}; |
#define ZERO_SIZE_PTR ((void *)16) |
62,14 → 59,7 |
/* Works only for digits and letters, but small and fast */ |
#define TOLOWER(x) ((x) | 0x20) |
static inline char *hex_byte_pack(char *buf, u8 byte) |
{ |
*buf++ = hex_asc_hi(byte); |
*buf++ = hex_asc_lo(byte); |
return buf; |
} |
char *skip_spaces(const char *str) |
{ |
while (isspace(*str)) |
1297,6 → 1287,7 |
* %piS depending on sa_family of 'struct sockaddr *' print IPv4/IPv6 address |
* %pU[bBlL] print a UUID/GUID in big or little endian using lower or upper |
* case. |
* %*pE[achnops] print an escaped buffer |
* %*ph[CDN] a variable-length hex string with a separator (supports up to 64 |
* bytes of the input) |
* %n is ignored |