Subversion Repositories Kolibri OS

Rev

Rev 4103 | Rev 6082 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
4065 Serge 1
/*
2
 * Written by Mark Hemment, 1996 (markhe@nextd.demon.co.uk).
3
 *
4
 * (C) SGI 2006, Christoph Lameter
5
 * 	Cleaned up and restructured to ease the addition of alternative
6
 * 	implementations of SLAB allocators.
4103 Serge 7
 * (C) Linux Foundation 2008-2013
8
 *      Unified interface for all slab allocators
4065 Serge 9
 */
1964 serge 10
 
4065 Serge 11
#ifndef _LINUX_SLAB_H
12
#define	_LINUX_SLAB_H
13
 
5270 serge 14
#include 
15
#include 
16
#include 
17
 
18
 
19
/*
20
 * Flags to pass to kmem_cache_create().
21
 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
22
 */
23
#define SLAB_DEBUG_FREE		0x00000100UL	/* DEBUG: Perform (expensive) checks on free */
24
#define SLAB_RED_ZONE		0x00000400UL	/* DEBUG: Red zone objs in a cache */
25
#define SLAB_POISON		0x00000800UL	/* DEBUG: Poison objects */
26
#define SLAB_HWCACHE_ALIGN	0x00002000UL	/* Align objs on cache lines */
27
#define SLAB_CACHE_DMA		0x00004000UL	/* Use GFP_DMA memory */
28
#define SLAB_STORE_USER		0x00010000UL	/* DEBUG: Store the last owner for bug hunting */
29
#define SLAB_PANIC		0x00040000UL	/* Panic if kmem_cache_create() fails */
30
/*
31
 * SLAB_DESTROY_BY_RCU - **WARNING** READ THIS!
32
 *
33
 * This delays freeing the SLAB page by a grace period, it does _NOT_
34
 * delay object freeing. This means that if you do kmem_cache_free()
35
 * that memory location is free to be reused at any time. Thus it may
36
 * be possible to see another object there in the same RCU grace period.
37
 *
38
 * This feature only ensures the memory location backing the object
39
 * stays valid, the trick to using this is relying on an independent
40
 * object validation pass. Something like:
41
 *
42
 *  rcu_read_lock()
43
 * again:
44
 *  obj = lockless_lookup(key);
45
 *  if (obj) {
46
 *    if (!try_get_ref(obj)) // might fail for free objects
47
 *      goto again;
48
 *
49
 *    if (obj->key != key) { // not the object we expected
50
 *      put_ref(obj);
51
 *      goto again;
52
 *    }
53
 *  }
54
 *  rcu_read_unlock();
55
 *
56
 * This is useful if we need to approach a kernel structure obliquely,
57
 * from its address obtained without the usual locking. We can lock
58
 * the structure to stabilize it and check it's still at the given address,
59
 * only if we can be sure that the memory has not been meanwhile reused
60
 * for some other kind of object (which our subsystem's lock might corrupt).
61
 *
62
 * rcu_read_lock before reading the address, then rcu_read_unlock after
63
 * taking the spinlock within the structure expected at that address.
64
 */
65
#define SLAB_DESTROY_BY_RCU	0x00080000UL	/* Defer freeing slabs to RCU */
66
#define SLAB_MEM_SPREAD		0x00100000UL	/* Spread some memory over cpuset */
67
#define SLAB_TRACE		0x00200000UL	/* Trace allocations and frees */
68
 
69
/* Flag to prevent checks on free */
70
#ifdef CONFIG_DEBUG_OBJECTS
71
# define SLAB_DEBUG_OBJECTS	0x00400000UL
72
#else
73
# define SLAB_DEBUG_OBJECTS	0x00000000UL
74
#endif
75
 
76
#define SLAB_NOLEAKTRACE	0x00800000UL	/* Avoid kmemleak tracing */
77
 
78
/* Don't track use of uninitialized memory */
79
#ifdef CONFIG_KMEMCHECK
80
# define SLAB_NOTRACK		0x01000000UL
81
#else
82
# define SLAB_NOTRACK		0x00000000UL
83
#endif
84
#ifdef CONFIG_FAILSLAB
85
# define SLAB_FAILSLAB		0x02000000UL	/* Fault injection mark */
86
#else
87
# define SLAB_FAILSLAB		0x00000000UL
88
#endif
89
 
90
/* The following flags affect the page allocator grouping pages by mobility */
91
#define SLAB_RECLAIM_ACCOUNT	0x00020000UL		/* Objects are reclaimable */
92
#define SLAB_TEMPORARY		SLAB_RECLAIM_ACCOUNT	/* Objects are short-lived */
93
/*
94
 * ZERO_SIZE_PTR will be returned for zero sized kmalloc requests.
95
 *
96
 * Dereferencing ZERO_SIZE_PTR will lead to a distinct access fault.
97
 *
98
 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
99
 * Both make kfree a no-op.
100
 */
101
#define ZERO_SIZE_PTR ((void *)16)
102
 
103
#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
104
				(unsigned long)ZERO_SIZE_PTR)
105
 
106
void __init kmem_cache_init(void);
107
int slab_is_available(void);
108
void kmem_cache_destroy(struct kmem_cache *);
109
int kmem_cache_shrink(struct kmem_cache *);
110
void kmem_cache_free(struct kmem_cache *, void *);
111
 
112
static inline void *krealloc(void *p, size_t new_size, gfp_t flags)
113
{
114
    return __builtin_realloc(p, new_size);
115
}
116
 
117
static inline void kfree(void *p)
118
{
119
	__builtin_free(p);
120
}
121
static __always_inline void *kmalloc(size_t size, gfp_t flags)
122
{
123
    return __builtin_malloc(size);
124
}
125
 
126
/**
127
 * kzalloc - allocate memory. The memory is set to zero.
128
 * @size: how many bytes of memory are required.
129
 * @flags: the type of memory to allocate (see kmalloc).
130
 */
131
static inline void *kzalloc(size_t size, gfp_t flags)
132
{
133
    void *ret = __builtin_malloc(size);
134
    memset(ret, 0, size);
135
    return ret;
136
}
137
 
138
static inline void *kcalloc(size_t n, size_t size, uint32_t flags)
139
{
140
    return (void*)kzalloc(n * size, 0);
141
}
142
 
143
static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags)
144
{
145
//    if (size != 0 && n > SIZE_MAX / size)
146
//        return NULL;
147
    return (void*)kmalloc(n * size, flags);
148
}
149
 
4065 Serge 150
#endif	/* _LINUX_SLAB_H */