Subversion Repositories Kolibri OS

Rev

Rev 6936 | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
4065 Serge 1
/*
2
 * Written by Mark Hemment, 1996 (markhe@nextd.demon.co.uk).
3
 *
4
 * (C) SGI 2006, Christoph Lameter
5
 * 	Cleaned up and restructured to ease the addition of alternative
6
 * 	implementations of SLAB allocators.
4103 Serge 7
 * (C) Linux Foundation 2008-2013
8
 *      Unified interface for all slab allocators
4065 Serge 9
 */
1964 serge 10
 
4065 Serge 11
#ifndef _LINUX_SLAB_H
12
#define	_LINUX_SLAB_H
13
 
5270 serge 14
#include 
15
#include 
16
#include 
17
 
18
 
19
/*
20
 * Flags to pass to kmem_cache_create().
6082 serge 21
 * The ones marked DEBUG are only valid if CONFIG_DEBUG_SLAB is set.
5270 serge 22
 */
7143 serge 23
#define SLAB_CONSISTENCY_CHECKS	0x00000100UL	/* DEBUG: Perform (expensive) checks on alloc/free */
5270 serge 24
#define SLAB_RED_ZONE		0x00000400UL	/* DEBUG: Red zone objs in a cache */
25
#define SLAB_POISON		0x00000800UL	/* DEBUG: Poison objects */
26
#define SLAB_HWCACHE_ALIGN	0x00002000UL	/* Align objs on cache lines */
27
#define SLAB_CACHE_DMA		0x00004000UL	/* Use GFP_DMA memory */
28
#define SLAB_STORE_USER		0x00010000UL	/* DEBUG: Store the last owner for bug hunting */
29
#define SLAB_PANIC		0x00040000UL	/* Panic if kmem_cache_create() fails */
30
/*
31
 * SLAB_DESTROY_BY_RCU - **WARNING** READ THIS!
32
 *
33
 * This delays freeing the SLAB page by a grace period, it does _NOT_
34
 * delay object freeing. This means that if you do kmem_cache_free()
35
 * that memory location is free to be reused at any time. Thus it may
36
 * be possible to see another object there in the same RCU grace period.
37
 *
38
 * This feature only ensures the memory location backing the object
39
 * stays valid, the trick to using this is relying on an independent
40
 * object validation pass. Something like:
41
 *
42
 *  rcu_read_lock()
43
 * again:
44
 *  obj = lockless_lookup(key);
45
 *  if (obj) {
46
 *    if (!try_get_ref(obj)) // might fail for free objects
47
 *      goto again;
48
 *
49
 *    if (obj->key != key) { // not the object we expected
50
 *      put_ref(obj);
51
 *      goto again;
52
 *    }
53
 *  }
54
 *  rcu_read_unlock();
55
 *
56
 * This is useful if we need to approach a kernel structure obliquely,
57
 * from its address obtained without the usual locking. We can lock
58
 * the structure to stabilize it and check it's still at the given address,
59
 * only if we can be sure that the memory has not been meanwhile reused
60
 * for some other kind of object (which our subsystem's lock might corrupt).
61
 *
62
 * rcu_read_lock before reading the address, then rcu_read_unlock after
63
 * taking the spinlock within the structure expected at that address.
64
 */
65
#define SLAB_DESTROY_BY_RCU	0x00080000UL	/* Defer freeing slabs to RCU */
66
#define SLAB_MEM_SPREAD		0x00100000UL	/* Spread some memory over cpuset */
67
#define SLAB_TRACE		0x00200000UL	/* Trace allocations and frees */
68
 
69
/* Flag to prevent checks on free */
70
#ifdef CONFIG_DEBUG_OBJECTS
71
# define SLAB_DEBUG_OBJECTS	0x00400000UL
72
#else
73
# define SLAB_DEBUG_OBJECTS	0x00000000UL
74
#endif
75
 
76
#define SLAB_NOLEAKTRACE	0x00800000UL	/* Avoid kmemleak tracing */
77
 
78
/* Don't track use of uninitialized memory */
79
#ifdef CONFIG_KMEMCHECK
80
# define SLAB_NOTRACK		0x01000000UL
81
#else
82
# define SLAB_NOTRACK		0x00000000UL
83
#endif
84
#ifdef CONFIG_FAILSLAB
85
# define SLAB_FAILSLAB		0x02000000UL	/* Fault injection mark */
86
#else
87
# define SLAB_FAILSLAB		0x00000000UL
88
#endif
6936 serge 89
#if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
90
# define SLAB_ACCOUNT		0x04000000UL	/* Account to memcg */
91
#else
92
# define SLAB_ACCOUNT		0x00000000UL
93
#endif
5270 serge 94
 
7143 serge 95
#ifdef CONFIG_KASAN
96
#define SLAB_KASAN		0x08000000UL
97
#else
98
#define SLAB_KASAN		0x00000000UL
99
#endif
100
 
5270 serge 101
/* The following flags affect the page allocator grouping pages by mobility */
102
#define SLAB_RECLAIM_ACCOUNT	0x00020000UL		/* Objects are reclaimable */
103
#define SLAB_TEMPORARY		SLAB_RECLAIM_ACCOUNT	/* Objects are short-lived */
104
/*
105
 * ZERO_SIZE_PTR will be returned for zero sized kmalloc requests.
106
 *
107
 * Dereferencing ZERO_SIZE_PTR will lead to a distinct access fault.
108
 *
109
 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
110
 * Both make kfree a no-op.
111
 */
112
#define ZERO_SIZE_PTR ((void *)16)
113
 
114
#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
115
				(unsigned long)ZERO_SIZE_PTR)
116
 
117
void __init kmem_cache_init(void);
6082 serge 118
bool slab_is_available(void);
119
 
120
struct kmem_cache *kmem_cache_create(const char *, size_t, size_t,
121
			unsigned long,
122
			void (*)(void *));
5270 serge 123
void kmem_cache_destroy(struct kmem_cache *);
124
int kmem_cache_shrink(struct kmem_cache *);
125
void kmem_cache_free(struct kmem_cache *, void *);
126
 
6936 serge 127
static inline void *krealloc(const void *p, size_t new_size, gfp_t flags)
5270 serge 128
{
6936 serge 129
    return __builtin_realloc((void*)p, new_size);
5270 serge 130
}
131
 
6936 serge 132
static inline void kfree(const void *p)
5270 serge 133
{
6936 serge 134
    __builtin_free((void*)p);
5270 serge 135
}
136
static __always_inline void *kmalloc(size_t size, gfp_t flags)
137
{
6082 serge 138
    void *ret = __builtin_malloc(size);
139
    memset(ret, 0, size);
140
    return ret;
5270 serge 141
}
142
 
143
/**
144
 * kzalloc - allocate memory. The memory is set to zero.
145
 * @size: how many bytes of memory are required.
146
 * @flags: the type of memory to allocate (see kmalloc).
147
 */
148
static inline void *kzalloc(size_t size, gfp_t flags)
149
{
150
    void *ret = __builtin_malloc(size);
151
    memset(ret, 0, size);
152
    return ret;
153
}
154
 
155
static inline void *kcalloc(size_t n, size_t size, uint32_t flags)
156
{
157
    return (void*)kzalloc(n * size, 0);
158
}
159
 
160
static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags)
161
{
162
//    if (size != 0 && n > SIZE_MAX / size)
163
//        return NULL;
164
    return (void*)kmalloc(n * size, flags);
165
}
166
 
4065 Serge 167
#endif	/* _LINUX_SLAB_H */