Subversion Repositories Kolibri OS

Rev

Rev 5270 | Go to most recent revision | Show entire file | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 5270 Rev 6295
Line 20... Line 20...
20
 * least 'size' bytes.  Free blocks are tracked in an unsorted singly-linked
20
 * least 'size' bytes.  Free blocks are tracked in an unsorted singly-linked
21
 * list of free blocks within the page.  Used blocks aren't tracked, but we
21
 * list of free blocks within the page.  Used blocks aren't tracked, but we
22
 * keep a count of how many are currently allocated from each page.
22
 * keep a count of how many are currently allocated from each page.
23
 */
23
 */
Line -... Line 24...
-
 
24
 
-
 
25
#include 
-
 
26
#include 
-
 
27
#include 
-
 
28
#include 
Line 24... Line -...
24
 
-
 
25
 
29
#include 
-
 
30
 
26
#include 
31
#include 
-
 
32
#include 
27
#include 
33
#include 
28
#include 
-
 
29
#include 
34
 
30
#include 
35
#include 
Line 31... Line 36...
31
#include 
36
#include 
32
#include 
37
#include 
33
 
38
 
34
 
39
 
-
 
40
struct dma_pool {       /* the pool */
35
struct dma_pool {       /* the pool */
41
    struct list_head page_list;
36
    struct list_head page_list;
42
	spinlock_t lock;
-
 
43
   size_t size;
37
    struct mutex lock;
44
	struct device *dev;
38
    size_t size;
45
    size_t allocation;
Line 39... Line 46...
39
    size_t allocation;
46
    size_t boundary;
40
    size_t boundary;
47
	char name[32];
Line 47... Line 54...
47
    dma_addr_t dma;
54
    dma_addr_t dma;
48
    unsigned int in_use;
55
    unsigned int in_use;
49
    unsigned int offset;
56
    unsigned int offset;
50
};
57
};
Line 51... Line -...
51
 
-
 
52
 
58
 
-
 
59
static DEFINE_MUTEX(pools_lock);
-
 
60
static DEFINE_MUTEX(pools_reg_lock);
-
 
61
 
Line 53... Line 62...
53
static DEFINE_MUTEX(pools_lock);
62
 
54
 
63
 
55
 
64
 
Line 77... Line 86...
77
struct dma_pool *dma_pool_create(const char *name, struct device *dev,
86
struct dma_pool *dma_pool_create(const char *name, struct device *dev,
78
                 size_t size, size_t align, size_t boundary)
87
				 size_t size, size_t align, size_t boundary)
79
{
88
{
80
    struct dma_pool *retval;
89
	struct dma_pool *retval;
81
    size_t allocation;
90
	size_t allocation;
-
 
91
	bool empty = false;
Line 82... Line 92...
82
 
92
 
83
    if (align == 0) {
93
	if (align == 0)
84
        align = 1;
94
		align = 1;
85
    } else if (align & (align - 1)) {
95
	else if (align & (align - 1))
86
        return NULL;
-
 
Line 87... Line 96...
87
    }
96
		return NULL;
88
 
97
 
89
    if (size == 0) {
98
	if (size == 0)
90
        return NULL;
99
		return NULL;
91
    } else if (size < 4) {
-
 
Line 92... Line 100...
92
        size = 4;
100
	else if (size < 4)
93
    }
101
		size = 4;
Line 94... Line 102...
94
 
102
 
Line 95... Line 103...
95
    if ((size % align) != 0)
103
	if ((size % align) != 0)
Line 96... Line 104...
96
        size = ALIGN(size, align);
104
		size = ALIGN(size, align);
97
 
105
 
98
    allocation = max_t(size_t, size, PAGE_SIZE);
106
	allocation = max_t(size_t, size, PAGE_SIZE);
99
 
107
 
100
    allocation = (allocation+0x7FFF) & ~0x7FFF;
-
 
Line 101... Line 108...
101
 
108
    allocation = (allocation+0x7FFF) & ~0x7FFF;
Line 102... Line 109...
102
    if (!boundary) {
109
 
103
        boundary = allocation;
110
	if (!boundary)
Line 104... Line 111...
104
    } else if ((boundary < size) || (boundary & (boundary - 1))) {
111
		boundary = allocation;
Line 105... Line 112...
105
        return NULL;
112
	else if ((boundary < size) || (boundary & (boundary - 1)))
Line -... Line 113...
-
 
113
		return NULL;
-
 
114
 
106
    }
115
    retval = kmalloc(sizeof(*retval), GFP_KERNEL);
107
 
116
 
108
    retval = kmalloc(sizeof(*retval), GFP_KERNEL);
117
    if (!retval)
Line 109... Line 118...
109
 
118
        return retval;
Line 137... Line 146...
137
        *(int *)(page->vaddr + offset) = next;
146
        *(int *)(page->vaddr + offset) = next;
138
        offset = next;
147
        offset = next;
139
    } while (offset < pool->allocation);
148
    } while (offset < pool->allocation);
140
}
149
}
Line 141... Line -...
141
 
-
 
142
 
150
 
143
static struct dma_page *pool_alloc_page(struct dma_pool *pool)
151
static struct dma_page *pool_alloc_page(struct dma_pool *pool, gfp_t mem_flags)
144
{
152
{
Line 145... Line 153...
145
    struct dma_page *page;
153
    struct dma_page *page;
146
 
154
 
147
    page = __builtin_malloc(sizeof(*page));
155
	page = kmalloc(sizeof(*page), mem_flags);
148
    if (!page)
156
	if (!page)
Line 149... Line 157...
149
        return NULL;
157
		return NULL;
Line 150... Line 158...
150
    page->vaddr = (void*)KernelAlloc(pool->allocation);
158
    page->vaddr = (void*)KernelAlloc(pool->allocation);
-
 
159
 
-
 
160
    dbgprintf("%s 0x%0x ",__FUNCTION__, page->vaddr);
151
 
161
 
-
 
162
	if (page->vaddr) {
152
    dbgprintf("%s 0x%0x ",__FUNCTION__, page->vaddr);
163
#ifdef	DMAPOOL_DEBUG
Line 153... Line 164...
153
 
164
		memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
Line 154... Line 165...
154
    if (page->vaddr)
165
#endif
155
    {
-
 
156
        page->dma = GetPgAddr(page->vaddr);
166
 
157
 
167
        page->dma = GetPgAddr(page->vaddr);
158
        dbgprintf("dma 0x%0x\n", page->dma);
168
 
159
 
169
        dbgprintf("dma 0x%0x\n", page->dma);
160
        pool_initialise_page(pool, page);
170
 
161
        list_add(&page->page_list, &pool->page_list);
171
        pool_initialise_page(pool, page);
162
        page->in_use = 0;
172
        page->in_use = 0;
163
        page->offset = 0;
173
        page->offset = 0;
Line 164... Line 174...
164
    } else {
174
    } else {
165
        free(page);
175
		kfree(page);
166
        page = NULL;
176
        page = NULL;
167
    }
177
    }
Line 168... Line -...
168
    return page;
-
 
169
}
178
    return page;
170
 
179
}
171
static inline int is_page_busy(struct dma_page *page)
180
 
Line -... Line 181...
-
 
181
static inline bool is_page_busy(struct dma_page *page)
-
 
182
{
-
 
183
	return page->in_use != 0;
-
 
184
}
172
{
185
 
173
    return page->in_use != 0;
186
static void pool_free_page(struct dma_pool *pool, struct dma_page *page)
174
}
187
{
175
 
188
	dma_addr_t dma = page->dma;
Line 176... Line -...
176
 
-
 
177
static void pool_free_page(struct dma_pool *pool, struct dma_page *page)
189
 
178
{
190
#ifdef	DMAPOOL_DEBUG
179
    dma_addr_t dma = page->dma;
191
	memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
180
 
192
#endif
181
    KernelFree(page->vaddr);
193
 
182
    list_del(&page->page_list);
194
    KernelFree(page->vaddr);
183
    free(page);
195
	list_del(&page->page_list);
184
}
196
	kfree(page);
185
 
197
}
186
 
198
 
-
 
199
/**
-
 
200
 * dma_pool_destroy - destroys a pool of dma memory blocks.
-
 
201
 * @pool: dma pool that will be destroyed
-
 
202
 * Context: !in_interrupt()
-
 
203
 *
-
 
204
 * Caller guarantees that no more memory from the pool is in use,
187
/**
205
 * and that nothing will try to use the pool after this call.
188
 * dma_pool_destroy - destroys a pool of dma memory blocks.
206
 */
189
 * @pool: dma pool that will be destroyed
207
void dma_pool_destroy(struct dma_pool *pool)
Line -... Line 208...
-
 
208
{
-
 
209
	bool empty = false;
190
 * Context: !in_interrupt()
210
 
191
 *
211
	if (unlikely(!pool))
192
 * Caller guarantees that no more memory from the pool is in use,
212
		return;
193
 * and that nothing will try to use the pool after this call.
213
 
194
 */
214
	mutex_lock(&pools_reg_lock);
195
void dma_pool_destroy(struct dma_pool *pool)
-
 
196
{
215
    mutex_lock(&pools_lock);
197
    mutex_lock(&pools_lock);
216
    list_del(&pool->pools);
198
    list_del(&pool->pools);
217
    mutex_unlock(&pools_lock);
199
    mutex_unlock(&pools_lock);
218
 
200
 
219
	mutex_unlock(&pools_reg_lock);
Line 213... Line 232...
213
            pool_free_page(pool, page);
232
            pool_free_page(pool, page);
214
    }
233
    }
Line 215... Line 234...
215
 
234
 
216
    kfree(pool);
235
    kfree(pool);
217
}
-
 
-
 
236
}
Line 218... Line 237...
218
 
237
EXPORT_SYMBOL(dma_pool_destroy);
219
 
238
 
220
/**
239
/**
221
 * dma_pool_alloc - get a block of consistent memory
240
 * dma_pool_alloc - get a block of consistent memory
Line 228... Line 247...
228
 * If such a memory block can't be allocated, %NULL is returned.
247
 * If such a memory block can't be allocated, %NULL is returned.
229
 */
248
 */
230
void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags,
249
void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags,
231
             dma_addr_t *handle)
250
             dma_addr_t *handle)
232
{
251
{
233
    u32   efl;
252
	unsigned long flags;
234
    struct  dma_page *page;
253
    struct  dma_page *page;
235
    size_t  offset;
254
    size_t  offset;
236
    void   *retval;
255
    void   *retval;
Line -... Line 256...
-
 
256
 
237
 
257
 
238
    efl = safe_cli();
-
 
239
 restart:
258
	spin_lock_irqsave(&pool->lock, flags);
240
    list_for_each_entry(page, &pool->page_list, page_list) {
259
    list_for_each_entry(page, &pool->page_list, page_list) {
241
        if (page->offset < pool->allocation)
260
        if (page->offset < pool->allocation)
242
            goto ready;
261
            goto ready;
-
 
262
    }
-
 
263
 
-
 
264
	/* pool_alloc_page() might sleep, so temporarily drop &pool->lock */
-
 
265
	spin_unlock_irqrestore(&pool->lock, flags);
243
    }
266
 
244
    page = pool_alloc_page(pool);
267
	page = pool_alloc_page(pool, mem_flags & (~__GFP_ZERO));
245
    if (!page)
-
 
246
    {
268
    if (!page)
247
        retval = NULL;
-
 
248
        goto done;
-
 
Line -... Line 269...
-
 
269
		return NULL;
-
 
270
 
-
 
271
	spin_lock_irqsave(&pool->lock, flags);
249
    }
272
 
250
 
273
	list_add(&page->page_list, &pool->page_list);
251
 ready:
274
 ready:
252
    page->in_use++;
275
	page->in_use++;
253
    offset = page->offset;
276
    offset = page->offset;
254
    page->offset = *(int *)(page->vaddr + offset);
277
    page->offset = *(int *)(page->vaddr + offset);
-
 
278
    retval = offset + page->vaddr;
-
 
279
	*handle = offset + page->dma;
255
    retval = offset + page->vaddr;
280
#ifdef	DMAPOOL_DEBUG
256
    *handle = offset + page->dma;
281
	{
-
 
282
		int i;
-
 
283
		u8 *data = retval;
-
 
284
		/* page->offset is stored in first 4 bytes */
-
 
285
		for (i = sizeof(page->offset); i < pool->size; i++) {
-
 
286
			if (data[i] == POOL_POISON_FREED)
-
 
287
				continue;
-
 
288
			if (pool->dev)
-
 
289
				dev_err(pool->dev,
-
 
290
					"dma_pool_alloc %s, %p (corrupted)\n",
-
 
291
					pool->name, retval);
257
 done:
292
			else
-
 
293
				pr_err("dma_pool_alloc %s, %p (corrupted)\n",
-
 
294
					pool->name, retval);
-
 
295
 
-
 
296
			/*
-
 
297
			 * Dump the first 4 bytes even if they are not
-
 
298
			 * POOL_POISON_FREED
-
 
299
			 */
-
 
300
			print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1,
-
 
301
					data, pool->size, 1);
258
    safe_sti(efl);
302
			break;
-
 
303
		}
-
 
304
	}
-
 
305
	if (!(mem_flags & __GFP_ZERO))
-
 
306
		memset(retval, POOL_POISON_ALLOCATED, pool->size);
Line -... Line 307...
-
 
307
#endif
-
 
308
	spin_unlock_irqrestore(&pool->lock, flags);
Line -... Line 309...
-
 
309
 
-
 
310
	if (mem_flags & __GFP_ZERO)
-
 
311
		memset(retval, 0, pool->size);
Line 259... Line 312...
259
    return retval;
312
 
260
}
313
    return retval;
261
 
314
}
262
 
-
 
263
 
-
 
264
static struct dma_page *pool_find_page(struct dma_pool *pool, dma_addr_t dma)
-
 
Line 265... Line 315...
265
{
315
EXPORT_SYMBOL(dma_pool_alloc);
266
    struct dma_page *page;
316
 
267
    u32  efl;
317
static struct dma_page *pool_find_page(struct dma_pool *pool, dma_addr_t dma)
268
 
318
{
269
    efl = safe_cli();
-
 
270
 
-
 
271
    list_for_each_entry(page, &pool->page_list, page_list) {
-
 
272
        if (dma < page->dma)
-
 
273
            continue;
-
 
274
        if (dma < (page->dma + pool->allocation))
-
 
275
            goto done;
319
    struct dma_page *page;
276
    }
320
 
-
 
321
    list_for_each_entry(page, &pool->page_list, page_list) {
-
 
322
        if (dma < page->dma)
Line 277... Line 323...
277
    page = NULL;
323
            continue;
278
 done:
324
		if ((dma - page->dma) < pool->allocation)
279
    safe_sti(efl);
325
			return page;
280
 
326
    }
Line 294... Line 340...
294
{
340
{
295
    struct dma_page *page;
341
    struct dma_page *page;
296
    unsigned long flags;
342
    unsigned long flags;
297
    unsigned int offset;
343
    unsigned int offset;
Line 298... Line 344...
298
 
344
 
299
    u32 efl;
-
 
300
 
345
	spin_lock_irqsave(&pool->lock, flags);
301
    page = pool_find_page(pool, dma);
346
    page = pool_find_page(pool, dma);
-
 
347
    if (!page) {
302
    if (!page) {
348
		spin_unlock_irqrestore(&pool->lock, flags);
303
        printk(KERN_ERR "dma_pool_free %p/%lx (bad dma)\n",
349
		printk(KERN_ERR "dma_pool_free %s, %p/%lx (bad dma)\n",
304
               vaddr, (unsigned long)dma);
350
		       pool->name, vaddr, (unsigned long)dma);
305
        return;
351
        return;
Line 306... Line 352...
306
    }
352
    }
-
 
353
 
-
 
354
    offset = vaddr - page->vaddr;
-
 
355
#ifdef	DMAPOOL_DEBUG
-
 
356
	if ((dma - page->dma) != offset) {
-
 
357
		spin_unlock_irqrestore(&pool->lock, flags);
-
 
358
		if (pool->dev)
-
 
359
			dev_err(pool->dev,
307
 
360
				"dma_pool_free %s, %p (bad vaddr)/%Lx\n",
308
    offset = vaddr - page->vaddr;
361
				pool->name, vaddr, (unsigned long long)dma);
-
 
362
		else
-
 
363
			printk(KERN_ERR
-
 
364
			       "dma_pool_free %s, %p (bad vaddr)/%Lx\n",
-
 
365
			       pool->name, vaddr, (unsigned long long)dma);
309
 
366
		return;
-
 
367
	}
-
 
368
	{
-
 
369
		unsigned int chain = page->offset;
-
 
370
		while (chain < pool->allocation) {
-
 
371
			if (chain != offset) {
-
 
372
				chain = *(int *)(page->vaddr + chain);
-
 
373
				continue;
-
 
374
			}
-
 
375
			spin_unlock_irqrestore(&pool->lock, flags);
-
 
376
			if (pool->dev)
-
 
377
				dev_err(pool->dev, "dma_pool_free %s, dma %Lx "
-
 
378
					"already free\n", pool->name,
-
 
379
					(unsigned long long)dma);
-
 
380
			else
-
 
381
				printk(KERN_ERR "dma_pool_free %s, dma %Lx "
-
 
382
					"already free\n", pool->name,
-
 
383
					(unsigned long long)dma);
-
 
384
			return;
-
 
385
		}
-
 
386
	}
-
 
387
	memset(vaddr, POOL_POISON_FREED, pool->size);
310
    efl = safe_cli();
388
#endif
311
    {
389
 
312
        page->in_use--;
390
	page->in_use--;
313
        *(int *)vaddr = page->offset;
391
	*(int *)vaddr = page->offset;
314
        page->offset = offset;
392
	page->offset = offset;
315
    /*
393
    /*
316
     * Resist a temptation to do
394
     * Resist a temptation to do
317
     *    if (!is_page_busy(page)) pool_free_page(pool, page);
395
     *    if (!is_page_busy(page)) pool_free_page(pool, page);
-
 
396
     * Better have a few empty pages hang around.
-
 
397
     */
-
 
398
	spin_unlock_irqrestore(&pool->lock, flags);
-
 
399
}
-
 
400
EXPORT_SYMBOL(dma_pool_free);
318
     * Better have a few empty pages hang around.
401
 
-
 
402
/*
-
 
403
 * Managed DMA pool
-
 
404
 */
-
 
405
static void dmam_pool_release(struct device *dev, void *res)
-
 
406
{
-
 
407
	struct dma_pool *pool = *(struct dma_pool **)res;
-
 
408
 
-
 
409
	dma_pool_destroy(pool);
-
 
410
}
-
 
411
 
-
 
412
static int dmam_pool_match(struct device *dev, void *res, void *match_data)
319
     */
413
{