Subversion Repositories Kolibri OS

Rev

Rev 5270 | Rev 6293 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
5056 serge 1
/*
2
 * Copyright (C) 2007 Jens Axboe 
3
 *
4
 * Scatterlist handling helpers.
5
 *
6
 * This source code is licensed under the GNU General Public License,
7
 * Version 2. See the file COPYING for more details.
8
 */
9
#include 
5270 serge 10
#include 
5056 serge 11
#include 
12
 
13
/**
14
 * sg_next - return the next scatterlist entry in a list
15
 * @sg:		The current sg entry
16
 *
17
 * Description:
18
 *   Usually the next entry will be @sg@ + 1, but if this sg element is part
19
 *   of a chained scatterlist, it could jump to the start of a new
20
 *   scatterlist array.
21
 *
22
 **/
23
struct scatterlist *sg_next(struct scatterlist *sg)
24
{
25
#ifdef CONFIG_DEBUG_SG
26
	BUG_ON(sg->sg_magic != SG_MAGIC);
27
#endif
6082 serge 28
	if (sg_is_last(sg))
29
		return NULL;
5056 serge 30
 
6082 serge 31
	sg++;
32
	if (unlikely(sg_is_chain(sg)))
33
		sg = sg_chain_ptr(sg);
5056 serge 34
 
6082 serge 35
	return sg;
5056 serge 36
}
37
EXPORT_SYMBOL(sg_next);
38
 
39
/**
40
 * sg_nents - return total count of entries in scatterlist
41
 * @sg:		The scatterlist
42
 *
43
 * Description:
44
 * Allows to know how many entries are in sg, taking into acount
45
 * chaining as well
46
 *
47
 **/
48
int sg_nents(struct scatterlist *sg)
49
{
50
	int nents;
51
	for (nents = 0; sg; sg = sg_next(sg))
52
		nents++;
53
	return nents;
54
}
55
EXPORT_SYMBOL(sg_nents);
56
 
6082 serge 57
/**
58
 * sg_nents_for_len - return total count of entries in scatterlist
59
 *                    needed to satisfy the supplied length
60
 * @sg:		The scatterlist
61
 * @len:	The total required length
62
 *
63
 * Description:
64
 * Determines the number of entries in sg that are required to meet
65
 * the supplied length, taking into acount chaining as well
66
 *
67
 * Returns:
68
 *   the number of sg entries needed, negative error on failure
69
 *
70
 **/
71
int sg_nents_for_len(struct scatterlist *sg, u64 len)
72
{
73
	int nents;
74
	u64 total;
5056 serge 75
 
6082 serge 76
	if (!len)
77
		return 0;
78
 
79
	for (nents = 0, total = 0; sg; sg = sg_next(sg)) {
80
		nents++;
81
		total += sg->length;
82
		if (total >= len)
83
			return nents;
84
	}
85
 
86
	return -EINVAL;
87
}
88
EXPORT_SYMBOL(sg_nents_for_len);
89
 
5056 serge 90
/**
91
 * sg_last - return the last scatterlist entry in a list
92
 * @sgl:	First entry in the scatterlist
93
 * @nents:	Number of entries in the scatterlist
94
 *
95
 * Description:
96
 *   Should only be used casually, it (currently) scans the entire list
97
 *   to get the last entry.
98
 *
99
 *   Note that the @sgl@ pointer passed in need not be the first one,
100
 *   the important bit is that @nents@ denotes the number of entries that
101
 *   exist from @sgl@.
102
 *
103
 **/
104
struct scatterlist *sg_last(struct scatterlist *sgl, unsigned int nents)
105
{
106
	struct scatterlist *sg, *ret = NULL;
107
	unsigned int i;
108
 
109
	for_each_sg(sgl, sg, nents, i)
110
		ret = sg;
111
 
112
#ifdef CONFIG_DEBUG_SG
113
	BUG_ON(sgl[0].sg_magic != SG_MAGIC);
114
	BUG_ON(!sg_is_last(ret));
115
#endif
116
	return ret;
117
}
118
EXPORT_SYMBOL(sg_last);
119
 
120
/**
121
 * sg_init_table - Initialize SG table
122
 * @sgl:	   The SG table
123
 * @nents:	   Number of entries in table
124
 *
125
 * Notes:
126
 *   If this is part of a chained sg table, sg_mark_end() should be
127
 *   used only on the last table part.
128
 *
129
 **/
130
void sg_init_table(struct scatterlist *sgl, unsigned int nents)
131
{
6082 serge 132
	memset(sgl, 0, sizeof(*sgl) * nents);
5056 serge 133
#ifdef CONFIG_DEBUG_SG
6082 serge 134
	{
135
		unsigned int i;
136
		for (i = 0; i < nents; i++)
137
			sgl[i].sg_magic = SG_MAGIC;
138
	}
5056 serge 139
#endif
6082 serge 140
	sg_mark_end(&sgl[nents - 1]);
5056 serge 141
}
142
EXPORT_SYMBOL(sg_init_table);
143
 
144
/**
145
 * sg_init_one - Initialize a single entry sg list
146
 * @sg:		 SG entry
147
 * @buf:	 Virtual address for IO
148
 * @buflen:	 IO length
149
 *
150
 **/
151
//void sg_init_one(struct scatterlist *sg, const void *buf, unsigned int buflen)
152
//{
153
//   sg_init_table(sg, 1);
154
//   sg_set_buf(sg, buf, buflen);
155
//}
156
EXPORT_SYMBOL(sg_init_one);
157
 
158
/*
159
 * The default behaviour of sg_alloc_table() is to use these kmalloc/kfree
160
 * helpers.
161
 */
162
static struct scatterlist *sg_kmalloc(unsigned int nents, gfp_t gfp_mask)
163
{
164
	return kmalloc(nents * sizeof(struct scatterlist), gfp_mask);
165
}
166
 
167
static void sg_kfree(struct scatterlist *sg, unsigned int nents)
168
{
169
	kfree(sg);
170
}
171
 
172
/**
173
 * __sg_free_table - Free a previously mapped sg table
174
 * @table:	The sg table header to use
175
 * @max_ents:	The maximum number of entries per single scatterlist
176
 * @skip_first_chunk: don't free the (preallocated) first scatterlist chunk
177
 * @free_fn:	Free function
178
 *
179
 *  Description:
180
 *    Free an sg table previously allocated and setup with
181
 *    __sg_alloc_table().  The @max_ents value must be identical to
182
 *    that previously used with __sg_alloc_table().
183
 *
184
 **/
185
void __sg_free_table(struct sg_table *table, unsigned int max_ents,
186
		     bool skip_first_chunk, sg_free_fn *free_fn)
187
{
6082 serge 188
	struct scatterlist *sgl, *next;
5056 serge 189
 
6082 serge 190
	if (unlikely(!table->sgl))
191
		return;
5056 serge 192
 
6082 serge 193
	sgl = table->sgl;
194
	while (table->orig_nents) {
195
		unsigned int alloc_size = table->orig_nents;
196
		unsigned int sg_size;
5056 serge 197
 
6082 serge 198
		/*
199
		 * If we have more than max_ents segments left,
200
		 * then assign 'next' to the sg table after the current one.
201
		 * sg_size is then one less than alloc size, since the last
202
		 * element is the chain pointer.
203
		 */
204
		if (alloc_size > max_ents) {
205
			next = sg_chain_ptr(&sgl[max_ents - 1]);
206
			alloc_size = max_ents;
207
			sg_size = alloc_size - 1;
208
		} else {
209
			sg_size = alloc_size;
210
			next = NULL;
211
		}
5056 serge 212
 
6082 serge 213
		table->orig_nents -= sg_size;
5270 serge 214
		if (skip_first_chunk)
215
			skip_first_chunk = false;
216
		else
5056 serge 217
			free_fn(sgl, alloc_size);
6082 serge 218
		sgl = next;
219
	}
5056 serge 220
 
6082 serge 221
	table->sgl = NULL;
5056 serge 222
}
223
EXPORT_SYMBOL(__sg_free_table);
224
 
225
/**
226
 * sg_free_table - Free a previously allocated sg table
227
 * @table:	The mapped sg table header
228
 *
229
 **/
230
void sg_free_table(struct sg_table *table)
231
{
232
	__sg_free_table(table, SG_MAX_SINGLE_ALLOC, false, sg_kfree);
233
}
234
EXPORT_SYMBOL(sg_free_table);
235
 
236
/**
237
 * __sg_alloc_table - Allocate and initialize an sg table with given allocator
238
 * @table:	The sg table header to use
239
 * @nents:	Number of entries in sg list
240
 * @max_ents:	The maximum number of entries the allocator returns per call
241
 * @gfp_mask:	GFP allocation mask
242
 * @alloc_fn:	Allocator to use
243
 *
244
 * Description:
245
 *   This function returns a @table @nents long. The allocator is
246
 *   defined to return scatterlist chunks of maximum size @max_ents.
247
 *   Thus if @nents is bigger than @max_ents, the scatterlists will be
248
 *   chained in units of @max_ents.
249
 *
250
 * Notes:
251
 *   If this function returns non-0 (eg failure), the caller must call
252
 *   __sg_free_table() to cleanup any leftover allocations.
253
 *
254
 **/
255
int __sg_alloc_table(struct sg_table *table, unsigned int nents,
256
		     unsigned int max_ents, struct scatterlist *first_chunk,
257
		     gfp_t gfp_mask, sg_alloc_fn *alloc_fn)
258
{
6082 serge 259
	struct scatterlist *sg, *prv;
260
	unsigned int left;
5056 serge 261
 
262
	memset(table, 0, sizeof(*table));
263
 
264
	if (nents == 0)
265
		return -EINVAL;
5270 serge 266
#ifndef CONFIG_ARCH_HAS_SG_CHAIN
5056 serge 267
	if (WARN_ON_ONCE(nents > max_ents))
268
		return -EINVAL;
269
#endif
270
 
6082 serge 271
	left = nents;
272
	prv = NULL;
273
	do {
274
		unsigned int sg_size, alloc_size = left;
5056 serge 275
 
6082 serge 276
		if (alloc_size > max_ents) {
277
			alloc_size = max_ents;
278
			sg_size = alloc_size - 1;
279
		} else
280
			sg_size = alloc_size;
5056 serge 281
 
6082 serge 282
		left -= sg_size;
5056 serge 283
 
284
		if (first_chunk) {
285
			sg = first_chunk;
286
			first_chunk = NULL;
287
		} else {
288
			sg = alloc_fn(alloc_size, gfp_mask);
289
		}
6082 serge 290
		if (unlikely(!sg)) {
291
			/*
292
			 * Adjust entry count to reflect that the last
293
			 * entry of the previous table won't be used for
294
			 * linkage.  Without this, sg_kfree() may get
295
			 * confused.
296
			 */
297
			if (prv)
298
				table->nents = ++table->orig_nents;
5056 serge 299
 
300
 			return -ENOMEM;
6082 serge 301
		}
5056 serge 302
 
6082 serge 303
		sg_init_table(sg, alloc_size);
304
		table->nents = table->orig_nents += sg_size;
5056 serge 305
 
6082 serge 306
		/*
307
		 * If this is the first mapping, assign the sg table header.
308
		 * If this is not the first mapping, chain previous part.
309
		 */
310
		if (prv)
311
			sg_chain(prv, max_ents, sg);
312
		else
313
			table->sgl = sg;
5056 serge 314
 
6082 serge 315
		/*
316
		 * If no more entries after this one, mark the end
317
		 */
318
		if (!left)
319
			sg_mark_end(&sg[sg_size - 1]);
5056 serge 320
 
6082 serge 321
		prv = sg;
322
	} while (left);
5056 serge 323
 
6082 serge 324
	return 0;
5056 serge 325
}
326
EXPORT_SYMBOL(__sg_alloc_table);
327
 
328
/**
329
 * sg_alloc_table - Allocate and initialize an sg table
330
 * @table:	The sg table header to use
331
 * @nents:	Number of entries in sg list
332
 * @gfp_mask:	GFP allocation mask
333
 *
334
 *  Description:
335
 *    Allocate and initialize an sg table. If @nents@ is larger than
336
 *    SG_MAX_SINGLE_ALLOC a chained sg table will be setup.
337
 *
338
 **/
339
int sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask)
340
{
341
	int ret;
342
 
343
	ret = __sg_alloc_table(table, nents, SG_MAX_SINGLE_ALLOC,
344
			       NULL, gfp_mask, sg_kmalloc);
345
	if (unlikely(ret))
346
		__sg_free_table(table, SG_MAX_SINGLE_ALLOC, false, sg_kfree);
347
 
348
	return ret;
349
}
350
EXPORT_SYMBOL(sg_alloc_table);
351
 
352
 
353
 
354
 
355
void __sg_page_iter_start(struct sg_page_iter *piter,
6082 serge 356
			  struct scatterlist *sglist, unsigned int nents,
357
			  unsigned long pgoffset)
5056 serge 358
{
6082 serge 359
	piter->__pg_advance = 0;
360
	piter->__nents = nents;
5056 serge 361
 
6082 serge 362
	piter->sg = sglist;
363
	piter->sg_pgoffset = pgoffset;
5056 serge 364
}
365
EXPORT_SYMBOL(__sg_page_iter_start);
366
 
367
static int sg_page_count(struct scatterlist *sg)
368
{
6082 serge 369
	return PAGE_ALIGN(sg->offset + sg->length) >> PAGE_SHIFT;
5056 serge 370
}
371
 
372
bool __sg_page_iter_next(struct sg_page_iter *piter)
373
{
6082 serge 374
	if (!piter->__nents || !piter->sg)
375
		return false;
5056 serge 376
 
6082 serge 377
	piter->sg_pgoffset += piter->__pg_advance;
378
	piter->__pg_advance = 1;
5056 serge 379
 
6082 serge 380
	while (piter->sg_pgoffset >= sg_page_count(piter->sg)) {
381
		piter->sg_pgoffset -= sg_page_count(piter->sg);
382
		piter->sg = sg_next(piter->sg);
383
		if (!--piter->__nents || !piter->sg)
384
			return false;
385
	}
5056 serge 386
 
6082 serge 387
	return true;
5056 serge 388
}
389
EXPORT_SYMBOL(__sg_page_iter_next);
390
 
6082 serge 391
/**
392
 * sg_miter_start - start mapping iteration over a sg list
393
 * @miter: sg mapping iter to be started
394
 * @sgl: sg list to iterate over
395
 * @nents: number of sg entries
396
 *
397
 * Description:
398
 *   Starts mapping iterator @miter.
399
 *
400
 * Context:
401
 *   Don't care.
402
 */
403
void sg_miter_start(struct sg_mapping_iter *miter, struct scatterlist *sgl,
404
		    unsigned int nents, unsigned int flags)
405
{
406
	memset(miter, 0, sizeof(struct sg_mapping_iter));
5056 serge 407
 
6082 serge 408
	__sg_page_iter_start(&miter->piter, sgl, nents, 0);
409
	WARN_ON(!(flags & (SG_MITER_TO_SG | SG_MITER_FROM_SG)));
410
	miter->__flags = flags;
411
}
412
EXPORT_SYMBOL(sg_miter_start);
5056 serge 413
 
6082 serge 414
static bool sg_miter_get_next_page(struct sg_mapping_iter *miter)
415
{
416
	if (!miter->__remaining) {
417
		struct scatterlist *sg;
418
		unsigned long pgoffset;
419
 
420
		if (!__sg_page_iter_next(&miter->piter))
421
			return false;
422
 
423
		sg = miter->piter.sg;
424
		pgoffset = miter->piter.sg_pgoffset;
425
 
426
		miter->__offset = pgoffset ? 0 : sg->offset;
427
		miter->__remaining = sg->offset + sg->length -
428
				(pgoffset << PAGE_SHIFT) - miter->__offset;
429
		miter->__remaining = min_t(unsigned long, miter->__remaining,
430
					   PAGE_SIZE - miter->__offset);
431
	}
432
 
433
	return true;
434
}
435
 
436
/**
437
 * sg_miter_skip - reposition mapping iterator
438
 * @miter: sg mapping iter to be skipped
439
 * @offset: number of bytes to plus the current location
440
 *
441
 * Description:
442
 *   Sets the offset of @miter to its current location plus @offset bytes.
443
 *   If mapping iterator @miter has been proceeded by sg_miter_next(), this
444
 *   stops @miter.
445
 *
446
 * Context:
447
 *   Don't care if @miter is stopped, or not proceeded yet.
448
 *   Otherwise, preemption disabled if the SG_MITER_ATOMIC is set.
449
 *
450
 * Returns:
451
 *   true if @miter contains the valid mapping.  false if end of sg
452
 *   list is reached.
453
 */
454
bool sg_miter_skip(struct sg_mapping_iter *miter, off_t offset)
455
{
456
	sg_miter_stop(miter);
457
 
458
	while (offset) {
459
		off_t consumed;
460
 
461
		if (!sg_miter_get_next_page(miter))
462
			return false;
463
 
464
		consumed = min_t(off_t, offset, miter->__remaining);
465
		miter->__offset += consumed;
466
		miter->__remaining -= consumed;
467
		offset -= consumed;
468
	}
469
 
470
	return true;
471
}
472
EXPORT_SYMBOL(sg_miter_skip);
473
 
474
/**
475
 * sg_miter_next - proceed mapping iterator to the next mapping
476
 * @miter: sg mapping iter to proceed
477
 *
478
 * Description:
479
 *   Proceeds @miter to the next mapping.  @miter should have been started
480
 *   using sg_miter_start().  On successful return, @miter->page,
481
 *   @miter->addr and @miter->length point to the current mapping.
482
 *
483
 * Context:
484
 *   Preemption disabled if SG_MITER_ATOMIC.  Preemption must stay disabled
485
 *   till @miter is stopped.  May sleep if !SG_MITER_ATOMIC.
486
 *
487
 * Returns:
488
 *   true if @miter contains the next mapping.  false if end of sg
489
 *   list is reached.
490
 */
491
bool sg_miter_next(struct sg_mapping_iter *miter)
492
{
493
	sg_miter_stop(miter);
494
 
495
	/*
496
	 * Get to the next page if necessary.
497
	 * __remaining, __offset is adjusted by sg_miter_stop
498
	 */
499
	if (!sg_miter_get_next_page(miter))
500
		return false;
501
 
502
	miter->page = sg_page_iter_page(&miter->piter);
503
	miter->consumed = miter->length = miter->__remaining;
504
 
505
	if (miter->__flags & SG_MITER_ATOMIC)
506
		miter->addr = kmap_atomic(miter->page) + miter->__offset;
507
	else
508
		miter->addr = kmap(miter->page) + miter->__offset;
509
 
510
	return true;
511
}
512
EXPORT_SYMBOL(sg_miter_next);
513
 
514
/**
515
 * sg_miter_stop - stop mapping iteration
516
 * @miter: sg mapping iter to be stopped
517
 *
518
 * Description:
519
 *   Stops mapping iterator @miter.  @miter should have been started
520
 *   started using sg_miter_start().  A stopped iteration can be
521
 *   resumed by calling sg_miter_next() on it.  This is useful when
522
 *   resources (kmap) need to be released during iteration.
523
 *
524
 * Context:
525
 *   Preemption disabled if the SG_MITER_ATOMIC is set.  Don't care
526
 *   otherwise.
527
 */
528
void sg_miter_stop(struct sg_mapping_iter *miter)
529
{
530
	WARN_ON(miter->consumed > miter->length);
531
 
532
	/* drop resources from the last iteration */
533
	if (miter->addr) {
534
		miter->__offset += miter->consumed;
535
		miter->__remaining -= miter->consumed;
536
 
537
		if (miter->__flags & SG_MITER_ATOMIC) {
538
			WARN_ON_ONCE(preemptible());
539
			kunmap_atomic(miter->addr);
540
		} else
541
			kunmap(miter->page);
542
 
543
		miter->page = NULL;
544
		miter->addr = NULL;
545
		miter->length = 0;
546
		miter->consumed = 0;
547
	}
548
}
549
EXPORT_SYMBOL(sg_miter_stop);
550
 
551
/**
552
 * sg_copy_buffer - Copy data between a linear buffer and an SG list
553
 * @sgl:		 The SG list
554
 * @nents:		 Number of SG entries
555
 * @buf:		 Where to copy from
556
 * @buflen:		 The number of bytes to copy
557
 * @skip:		 Number of bytes to skip before copying
558
 * @to_buffer:		 transfer direction (true == from an sg list to a
559
 *			 buffer, false == from a buffer to an sg list
560
 *
561
 * Returns the number of copied bytes.
562
 *
563
 **/
564
size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, void *buf,
565
		      size_t buflen, off_t skip, bool to_buffer)
566
{
567
	unsigned int offset = 0;
568
	struct sg_mapping_iter miter;
569
	unsigned long flags;
570
	unsigned int sg_flags = SG_MITER_ATOMIC;
571
 
572
	if (to_buffer)
573
		sg_flags |= SG_MITER_FROM_SG;
574
	else
575
		sg_flags |= SG_MITER_TO_SG;
576
 
577
	sg_miter_start(&miter, sgl, nents, sg_flags);
578
 
579
	if (!sg_miter_skip(&miter, skip))
580
		return false;
581
 
582
	local_irq_save(flags);
583
 
584
	while (sg_miter_next(&miter) && offset < buflen) {
585
		unsigned int len;
586
 
587
		len = min(miter.length, buflen - offset);
588
 
589
		if (to_buffer)
590
			memcpy(buf + offset, miter.addr, len);
591
		else
592
			memcpy(miter.addr, buf + offset, len);
593
 
594
		offset += len;
595
	}
596
 
597
	sg_miter_stop(&miter);
598
 
599
	local_irq_restore(flags);
600
	return offset;
601
}
602
EXPORT_SYMBOL(sg_copy_buffer);
603
 
604
/**
605
 * sg_copy_from_buffer - Copy from a linear buffer to an SG list
606
 * @sgl:		 The SG list
607
 * @nents:		 Number of SG entries
608
 * @buf:		 Where to copy from
609
 * @buflen:		 The number of bytes to copy
610
 *
611
 * Returns the number of copied bytes.
612
 *
613
 **/
614
size_t sg_copy_from_buffer(struct scatterlist *sgl, unsigned int nents,
615
			   const void *buf, size_t buflen)
616
{
617
	return sg_copy_buffer(sgl, nents, (void *)buf, buflen, 0, false);
618
}
619
EXPORT_SYMBOL(sg_copy_from_buffer);
620
 
621
/**
622
 * sg_copy_to_buffer - Copy from an SG list to a linear buffer
623
 * @sgl:		 The SG list
624
 * @nents:		 Number of SG entries
625
 * @buf:		 Where to copy to
626
 * @buflen:		 The number of bytes to copy
627
 *
628
 * Returns the number of copied bytes.
629
 *
630
 **/
631
size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents,
632
			 void *buf, size_t buflen)
633
{
634
	return sg_copy_buffer(sgl, nents, buf, buflen, 0, true);
635
}
636
EXPORT_SYMBOL(sg_copy_to_buffer);
637
 
638
/**
639
 * sg_pcopy_from_buffer - Copy from a linear buffer to an SG list
640
 * @sgl:		 The SG list
641
 * @nents:		 Number of SG entries
642
 * @buf:		 Where to copy from
643
 * @buflen:		 The number of bytes to copy
644
 * @skip:		 Number of bytes to skip before copying
645
 *
646
 * Returns the number of copied bytes.
647
 *
648
 **/
649
size_t sg_pcopy_from_buffer(struct scatterlist *sgl, unsigned int nents,
650
			    const void *buf, size_t buflen, off_t skip)
651
{
652
	return sg_copy_buffer(sgl, nents, (void *)buf, buflen, skip, false);
653
}
654
EXPORT_SYMBOL(sg_pcopy_from_buffer);
655
 
656
/**
657
 * sg_pcopy_to_buffer - Copy from an SG list to a linear buffer
658
 * @sgl:		 The SG list
659
 * @nents:		 Number of SG entries
660
 * @buf:		 Where to copy to
661
 * @buflen:		 The number of bytes to copy
662
 * @skip:		 Number of bytes to skip before copying
663
 *
664
 * Returns the number of copied bytes.
665
 *
666
 **/
667
size_t sg_pcopy_to_buffer(struct scatterlist *sgl, unsigned int nents,
668
			  void *buf, size_t buflen, off_t skip)
669
{
670
	return sg_copy_buffer(sgl, nents, buf, buflen, skip, true);
671
}
672
EXPORT_SYMBOL(sg_pcopy_to_buffer);