Subversion Repositories Kolibri OS

Rev

Rev 6293 | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
5056 serge 1
/*
2
 * Copyright (C) 2007 Jens Axboe 
3
 *
4
 * Scatterlist handling helpers.
5
 *
6
 * This source code is licensed under the GNU General Public License,
7
 * Version 2. See the file COPYING for more details.
8
 */
9
#include 
5270 serge 10
#include 
5056 serge 11
#include 
12
 
13
/**
14
 * sg_next - return the next scatterlist entry in a list
15
 * @sg:		The current sg entry
16
 *
17
 * Description:
18
 *   Usually the next entry will be @sg@ + 1, but if this sg element is part
19
 *   of a chained scatterlist, it could jump to the start of a new
20
 *   scatterlist array.
21
 *
22
 **/
23
struct scatterlist *sg_next(struct scatterlist *sg)
24
{
25
#ifdef CONFIG_DEBUG_SG
26
	BUG_ON(sg->sg_magic != SG_MAGIC);
27
#endif
6082 serge 28
	if (sg_is_last(sg))
29
		return NULL;
5056 serge 30
 
6082 serge 31
	sg++;
32
	if (unlikely(sg_is_chain(sg)))
33
		sg = sg_chain_ptr(sg);
5056 serge 34
 
6082 serge 35
	return sg;
5056 serge 36
}
37
EXPORT_SYMBOL(sg_next);
38
 
39
/**
40
 * sg_nents - return total count of entries in scatterlist
41
 * @sg:		The scatterlist
42
 *
43
 * Description:
44
 * Allows to know how many entries are in sg, taking into acount
45
 * chaining as well
46
 *
47
 **/
48
int sg_nents(struct scatterlist *sg)
49
{
50
	int nents;
51
	for (nents = 0; sg; sg = sg_next(sg))
52
		nents++;
53
	return nents;
54
}
55
EXPORT_SYMBOL(sg_nents);
56
 
6082 serge 57
/**
58
 * sg_nents_for_len - return total count of entries in scatterlist
59
 *                    needed to satisfy the supplied length
60
 * @sg:		The scatterlist
61
 * @len:	The total required length
62
 *
63
 * Description:
64
 * Determines the number of entries in sg that are required to meet
65
 * the supplied length, taking into acount chaining as well
66
 *
67
 * Returns:
68
 *   the number of sg entries needed, negative error on failure
69
 *
70
 **/
71
int sg_nents_for_len(struct scatterlist *sg, u64 len)
72
{
73
	int nents;
74
	u64 total;
5056 serge 75
 
6082 serge 76
	if (!len)
77
		return 0;
78
 
79
	for (nents = 0, total = 0; sg; sg = sg_next(sg)) {
80
		nents++;
81
		total += sg->length;
82
		if (total >= len)
83
			return nents;
84
	}
85
 
86
	return -EINVAL;
87
}
88
EXPORT_SYMBOL(sg_nents_for_len);
89
 
5056 serge 90
/**
91
 * sg_last - return the last scatterlist entry in a list
92
 * @sgl:	First entry in the scatterlist
93
 * @nents:	Number of entries in the scatterlist
94
 *
95
 * Description:
96
 *   Should only be used casually, it (currently) scans the entire list
97
 *   to get the last entry.
98
 *
99
 *   Note that the @sgl@ pointer passed in need not be the first one,
100
 *   the important bit is that @nents@ denotes the number of entries that
101
 *   exist from @sgl@.
102
 *
103
 **/
104
struct scatterlist *sg_last(struct scatterlist *sgl, unsigned int nents)
105
{
106
	struct scatterlist *sg, *ret = NULL;
107
	unsigned int i;
108
 
109
	for_each_sg(sgl, sg, nents, i)
110
		ret = sg;
111
 
112
#ifdef CONFIG_DEBUG_SG
113
	BUG_ON(sgl[0].sg_magic != SG_MAGIC);
114
	BUG_ON(!sg_is_last(ret));
115
#endif
116
	return ret;
117
}
118
EXPORT_SYMBOL(sg_last);
119
 
120
/**
121
 * sg_init_table - Initialize SG table
122
 * @sgl:	   The SG table
123
 * @nents:	   Number of entries in table
124
 *
125
 * Notes:
126
 *   If this is part of a chained sg table, sg_mark_end() should be
127
 *   used only on the last table part.
128
 *
129
 **/
130
void sg_init_table(struct scatterlist *sgl, unsigned int nents)
131
{
6082 serge 132
	memset(sgl, 0, sizeof(*sgl) * nents);
5056 serge 133
#ifdef CONFIG_DEBUG_SG
6082 serge 134
	{
135
		unsigned int i;
136
		for (i = 0; i < nents; i++)
137
			sgl[i].sg_magic = SG_MAGIC;
138
	}
5056 serge 139
#endif
6082 serge 140
	sg_mark_end(&sgl[nents - 1]);
5056 serge 141
}
142
EXPORT_SYMBOL(sg_init_table);
143
 
144
/**
145
 * sg_init_one - Initialize a single entry sg list
146
 * @sg:		 SG entry
147
 * @buf:	 Virtual address for IO
148
 * @buflen:	 IO length
149
 *
150
 **/
151
//void sg_init_one(struct scatterlist *sg, const void *buf, unsigned int buflen)
152
//{
153
//   sg_init_table(sg, 1);
154
//   sg_set_buf(sg, buf, buflen);
155
//}
156
EXPORT_SYMBOL(sg_init_one);
157
 
158
/*
159
 * The default behaviour of sg_alloc_table() is to use these kmalloc/kfree
160
 * helpers.
161
 */
162
static struct scatterlist *sg_kmalloc(unsigned int nents, gfp_t gfp_mask)
163
{
164
	return kmalloc(nents * sizeof(struct scatterlist), gfp_mask);
165
}
166
 
167
static void sg_kfree(struct scatterlist *sg, unsigned int nents)
168
{
169
	kfree(sg);
170
}
171
 
172
/**
173
 * __sg_free_table - Free a previously mapped sg table
174
 * @table:	The sg table header to use
175
 * @max_ents:	The maximum number of entries per single scatterlist
176
 * @skip_first_chunk: don't free the (preallocated) first scatterlist chunk
177
 * @free_fn:	Free function
178
 *
179
 *  Description:
180
 *    Free an sg table previously allocated and setup with
181
 *    __sg_alloc_table().  The @max_ents value must be identical to
182
 *    that previously used with __sg_alloc_table().
183
 *
184
 **/
185
void __sg_free_table(struct sg_table *table, unsigned int max_ents,
186
		     bool skip_first_chunk, sg_free_fn *free_fn)
187
{
6082 serge 188
	struct scatterlist *sgl, *next;
5056 serge 189
 
6082 serge 190
	if (unlikely(!table->sgl))
191
		return;
5056 serge 192
 
6082 serge 193
	sgl = table->sgl;
194
	while (table->orig_nents) {
195
		unsigned int alloc_size = table->orig_nents;
196
		unsigned int sg_size;
5056 serge 197
 
6082 serge 198
		/*
199
		 * If we have more than max_ents segments left,
200
		 * then assign 'next' to the sg table after the current one.
201
		 * sg_size is then one less than alloc size, since the last
202
		 * element is the chain pointer.
203
		 */
204
		if (alloc_size > max_ents) {
205
			next = sg_chain_ptr(&sgl[max_ents - 1]);
206
			alloc_size = max_ents;
207
			sg_size = alloc_size - 1;
208
		} else {
209
			sg_size = alloc_size;
210
			next = NULL;
211
		}
5056 serge 212
 
6082 serge 213
		table->orig_nents -= sg_size;
5270 serge 214
		if (skip_first_chunk)
215
			skip_first_chunk = false;
216
		else
5056 serge 217
			free_fn(sgl, alloc_size);
6082 serge 218
		sgl = next;
219
	}
5056 serge 220
 
6082 serge 221
	table->sgl = NULL;
5056 serge 222
}
223
EXPORT_SYMBOL(__sg_free_table);
224
 
225
/**
226
 * sg_free_table - Free a previously allocated sg table
227
 * @table:	The mapped sg table header
228
 *
229
 **/
230
void sg_free_table(struct sg_table *table)
231
{
232
	__sg_free_table(table, SG_MAX_SINGLE_ALLOC, false, sg_kfree);
233
}
234
EXPORT_SYMBOL(sg_free_table);
235
 
236
/**
237
 * __sg_alloc_table - Allocate and initialize an sg table with given allocator
238
 * @table:	The sg table header to use
239
 * @nents:	Number of entries in sg list
240
 * @max_ents:	The maximum number of entries the allocator returns per call
241
 * @gfp_mask:	GFP allocation mask
242
 * @alloc_fn:	Allocator to use
243
 *
244
 * Description:
245
 *   This function returns a @table @nents long. The allocator is
246
 *   defined to return scatterlist chunks of maximum size @max_ents.
247
 *   Thus if @nents is bigger than @max_ents, the scatterlists will be
248
 *   chained in units of @max_ents.
249
 *
250
 * Notes:
251
 *   If this function returns non-0 (eg failure), the caller must call
252
 *   __sg_free_table() to cleanup any leftover allocations.
253
 *
254
 **/
255
int __sg_alloc_table(struct sg_table *table, unsigned int nents,
256
		     unsigned int max_ents, struct scatterlist *first_chunk,
257
		     gfp_t gfp_mask, sg_alloc_fn *alloc_fn)
258
{
6082 serge 259
	struct scatterlist *sg, *prv;
260
	unsigned int left;
5056 serge 261
 
262
	memset(table, 0, sizeof(*table));
263
 
264
	if (nents == 0)
265
		return -EINVAL;
5270 serge 266
#ifndef CONFIG_ARCH_HAS_SG_CHAIN
5056 serge 267
	if (WARN_ON_ONCE(nents > max_ents))
268
		return -EINVAL;
269
#endif
270
 
6082 serge 271
	left = nents;
272
	prv = NULL;
273
	do {
274
		unsigned int sg_size, alloc_size = left;
5056 serge 275
 
6082 serge 276
		if (alloc_size > max_ents) {
277
			alloc_size = max_ents;
278
			sg_size = alloc_size - 1;
279
		} else
280
			sg_size = alloc_size;
5056 serge 281
 
6082 serge 282
		left -= sg_size;
5056 serge 283
 
284
		if (first_chunk) {
285
			sg = first_chunk;
286
			first_chunk = NULL;
287
		} else {
288
			sg = alloc_fn(alloc_size, gfp_mask);
289
		}
6082 serge 290
		if (unlikely(!sg)) {
291
			/*
292
			 * Adjust entry count to reflect that the last
293
			 * entry of the previous table won't be used for
294
			 * linkage.  Without this, sg_kfree() may get
295
			 * confused.
296
			 */
297
			if (prv)
298
				table->nents = ++table->orig_nents;
5056 serge 299
 
300
 			return -ENOMEM;
6082 serge 301
		}
5056 serge 302
 
6082 serge 303
		sg_init_table(sg, alloc_size);
304
		table->nents = table->orig_nents += sg_size;
5056 serge 305
 
6082 serge 306
		/*
307
		 * If this is the first mapping, assign the sg table header.
308
		 * If this is not the first mapping, chain previous part.
309
		 */
310
		if (prv)
311
			sg_chain(prv, max_ents, sg);
312
		else
313
			table->sgl = sg;
5056 serge 314
 
6082 serge 315
		/*
316
		 * If no more entries after this one, mark the end
317
		 */
318
		if (!left)
319
			sg_mark_end(&sg[sg_size - 1]);
5056 serge 320
 
6082 serge 321
		prv = sg;
322
	} while (left);
5056 serge 323
 
6082 serge 324
	return 0;
5056 serge 325
}
326
EXPORT_SYMBOL(__sg_alloc_table);
327
 
328
/**
329
 * sg_alloc_table - Allocate and initialize an sg table
330
 * @table:	The sg table header to use
331
 * @nents:	Number of entries in sg list
332
 * @gfp_mask:	GFP allocation mask
333
 *
334
 *  Description:
335
 *    Allocate and initialize an sg table. If @nents@ is larger than
336
 *    SG_MAX_SINGLE_ALLOC a chained sg table will be setup.
337
 *
338
 **/
339
int sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask)
340
{
341
	int ret;
342
 
343
	ret = __sg_alloc_table(table, nents, SG_MAX_SINGLE_ALLOC,
344
			       NULL, gfp_mask, sg_kmalloc);
345
	if (unlikely(ret))
346
		__sg_free_table(table, SG_MAX_SINGLE_ALLOC, false, sg_kfree);
347
 
348
	return ret;
349
}
350
EXPORT_SYMBOL(sg_alloc_table);
351
 
6293 serge 352
/**
353
 * sg_alloc_table_from_pages - Allocate and initialize an sg table from
354
 *			       an array of pages
355
 * @sgt:	The sg table header to use
356
 * @pages:	Pointer to an array of page pointers
357
 * @n_pages:	Number of pages in the pages array
358
 * @offset:     Offset from start of the first page to the start of a buffer
359
 * @size:       Number of valid bytes in the buffer (after offset)
360
 * @gfp_mask:	GFP allocation mask
361
 *
362
 *  Description:
363
 *    Allocate and initialize an sg table from a list of pages. Contiguous
364
 *    ranges of the pages are squashed into a single scatterlist node. A user
365
 *    may provide an offset at a start and a size of valid data in a buffer
366
 *    specified by the page array. The returned sg table is released by
367
 *    sg_free_table.
368
 *
369
 * Returns:
370
 *   0 on success, negative error on failure
371
 */
372
int sg_alloc_table_from_pages(struct sg_table *sgt,
373
	struct page **pages, unsigned int n_pages,
374
	unsigned long offset, unsigned long size,
375
	gfp_t gfp_mask)
376
{
377
	unsigned int chunks;
378
	unsigned int i;
379
	unsigned int cur_page;
380
	int ret;
381
	struct scatterlist *s;
5056 serge 382
 
6293 serge 383
	/* compute number of contiguous chunks */
384
	chunks = 1;
385
	for (i = 1; i < n_pages; ++i)
386
		if (page_to_pfn(pages[i]) != page_to_pfn(pages[i - 1]) + 1)
387
			++chunks;
5056 serge 388
 
6293 serge 389
	ret = sg_alloc_table(sgt, chunks, gfp_mask);
390
	if (unlikely(ret))
391
		return ret;
5056 serge 392
 
6293 serge 393
	/* merging chunks and putting them into the scatterlist */
394
	cur_page = 0;
395
	for_each_sg(sgt->sgl, s, sgt->orig_nents, i) {
396
		unsigned long chunk_size;
397
		unsigned int j;
398
 
399
		/* look for the end of the current chunk */
400
		for (j = cur_page + 1; j < n_pages; ++j)
401
			if (page_to_pfn(pages[j]) !=
402
			    page_to_pfn(pages[j - 1]) + 1)
403
				break;
404
 
405
		chunk_size = ((j - cur_page) << PAGE_SHIFT) - offset;
406
		sg_set_page(s, pages[cur_page], min(size, chunk_size), offset);
407
		size -= chunk_size;
408
		offset = 0;
409
		cur_page = j;
410
	}
411
 
412
	return 0;
413
}
414
EXPORT_SYMBOL(sg_alloc_table_from_pages);
415
 
5056 serge 416
void __sg_page_iter_start(struct sg_page_iter *piter,
6082 serge 417
			  struct scatterlist *sglist, unsigned int nents,
418
			  unsigned long pgoffset)
5056 serge 419
{
6082 serge 420
	piter->__pg_advance = 0;
421
	piter->__nents = nents;
5056 serge 422
 
6082 serge 423
	piter->sg = sglist;
424
	piter->sg_pgoffset = pgoffset;
5056 serge 425
}
426
EXPORT_SYMBOL(__sg_page_iter_start);
427
 
428
static int sg_page_count(struct scatterlist *sg)
429
{
6082 serge 430
	return PAGE_ALIGN(sg->offset + sg->length) >> PAGE_SHIFT;
5056 serge 431
}
432
 
433
bool __sg_page_iter_next(struct sg_page_iter *piter)
434
{
6082 serge 435
	if (!piter->__nents || !piter->sg)
436
		return false;
5056 serge 437
 
6082 serge 438
	piter->sg_pgoffset += piter->__pg_advance;
439
	piter->__pg_advance = 1;
5056 serge 440
 
6082 serge 441
	while (piter->sg_pgoffset >= sg_page_count(piter->sg)) {
442
		piter->sg_pgoffset -= sg_page_count(piter->sg);
443
		piter->sg = sg_next(piter->sg);
444
		if (!--piter->__nents || !piter->sg)
445
			return false;
446
	}
5056 serge 447
 
6082 serge 448
	return true;
5056 serge 449
}
450
EXPORT_SYMBOL(__sg_page_iter_next);
451
 
6082 serge 452
/**
453
 * sg_miter_start - start mapping iteration over a sg list
454
 * @miter: sg mapping iter to be started
455
 * @sgl: sg list to iterate over
456
 * @nents: number of sg entries
457
 *
458
 * Description:
459
 *   Starts mapping iterator @miter.
460
 *
461
 * Context:
462
 *   Don't care.
463
 */
464
void sg_miter_start(struct sg_mapping_iter *miter, struct scatterlist *sgl,
465
		    unsigned int nents, unsigned int flags)
466
{
467
	memset(miter, 0, sizeof(struct sg_mapping_iter));
5056 serge 468
 
6082 serge 469
	__sg_page_iter_start(&miter->piter, sgl, nents, 0);
470
	WARN_ON(!(flags & (SG_MITER_TO_SG | SG_MITER_FROM_SG)));
471
	miter->__flags = flags;
472
}
473
EXPORT_SYMBOL(sg_miter_start);
5056 serge 474
 
6082 serge 475
static bool sg_miter_get_next_page(struct sg_mapping_iter *miter)
476
{
477
	if (!miter->__remaining) {
478
		struct scatterlist *sg;
479
		unsigned long pgoffset;
480
 
481
		if (!__sg_page_iter_next(&miter->piter))
482
			return false;
483
 
484
		sg = miter->piter.sg;
485
		pgoffset = miter->piter.sg_pgoffset;
486
 
487
		miter->__offset = pgoffset ? 0 : sg->offset;
488
		miter->__remaining = sg->offset + sg->length -
489
				(pgoffset << PAGE_SHIFT) - miter->__offset;
490
		miter->__remaining = min_t(unsigned long, miter->__remaining,
491
					   PAGE_SIZE - miter->__offset);
492
	}
493
 
494
	return true;
495
}
496
 
497
/**
498
 * sg_miter_skip - reposition mapping iterator
499
 * @miter: sg mapping iter to be skipped
500
 * @offset: number of bytes to plus the current location
501
 *
502
 * Description:
503
 *   Sets the offset of @miter to its current location plus @offset bytes.
504
 *   If mapping iterator @miter has been proceeded by sg_miter_next(), this
505
 *   stops @miter.
506
 *
507
 * Context:
508
 *   Don't care if @miter is stopped, or not proceeded yet.
509
 *   Otherwise, preemption disabled if the SG_MITER_ATOMIC is set.
510
 *
511
 * Returns:
512
 *   true if @miter contains the valid mapping.  false if end of sg
513
 *   list is reached.
514
 */
515
bool sg_miter_skip(struct sg_mapping_iter *miter, off_t offset)
516
{
517
	sg_miter_stop(miter);
518
 
519
	while (offset) {
520
		off_t consumed;
521
 
522
		if (!sg_miter_get_next_page(miter))
523
			return false;
524
 
525
		consumed = min_t(off_t, offset, miter->__remaining);
526
		miter->__offset += consumed;
527
		miter->__remaining -= consumed;
528
		offset -= consumed;
529
	}
530
 
531
	return true;
532
}
533
EXPORT_SYMBOL(sg_miter_skip);
534
 
535
/**
536
 * sg_miter_next - proceed mapping iterator to the next mapping
537
 * @miter: sg mapping iter to proceed
538
 *
539
 * Description:
540
 *   Proceeds @miter to the next mapping.  @miter should have been started
541
 *   using sg_miter_start().  On successful return, @miter->page,
542
 *   @miter->addr and @miter->length point to the current mapping.
543
 *
544
 * Context:
545
 *   Preemption disabled if SG_MITER_ATOMIC.  Preemption must stay disabled
546
 *   till @miter is stopped.  May sleep if !SG_MITER_ATOMIC.
547
 *
548
 * Returns:
549
 *   true if @miter contains the next mapping.  false if end of sg
550
 *   list is reached.
551
 */
552
bool sg_miter_next(struct sg_mapping_iter *miter)
553
{
554
	sg_miter_stop(miter);
555
 
556
	/*
557
	 * Get to the next page if necessary.
558
	 * __remaining, __offset is adjusted by sg_miter_stop
559
	 */
560
	if (!sg_miter_get_next_page(miter))
561
		return false;
562
 
563
	miter->page = sg_page_iter_page(&miter->piter);
564
	miter->consumed = miter->length = miter->__remaining;
565
 
566
	if (miter->__flags & SG_MITER_ATOMIC)
567
		miter->addr = kmap_atomic(miter->page) + miter->__offset;
568
	else
569
		miter->addr = kmap(miter->page) + miter->__offset;
570
 
571
	return true;
572
}
573
EXPORT_SYMBOL(sg_miter_next);
574
 
575
/**
576
 * sg_miter_stop - stop mapping iteration
577
 * @miter: sg mapping iter to be stopped
578
 *
579
 * Description:
580
 *   Stops mapping iterator @miter.  @miter should have been started
6936 serge 581
 *   using sg_miter_start().  A stopped iteration can be resumed by
582
 *   calling sg_miter_next() on it.  This is useful when resources (kmap)
583
 *   need to be released during iteration.
6082 serge 584
 *
585
 * Context:
586
 *   Preemption disabled if the SG_MITER_ATOMIC is set.  Don't care
587
 *   otherwise.
588
 */
589
void sg_miter_stop(struct sg_mapping_iter *miter)
590
{
591
	WARN_ON(miter->consumed > miter->length);
592
 
593
	/* drop resources from the last iteration */
594
	if (miter->addr) {
595
		miter->__offset += miter->consumed;
596
		miter->__remaining -= miter->consumed;
597
 
598
		if (miter->__flags & SG_MITER_ATOMIC) {
599
			WARN_ON_ONCE(preemptible());
600
			kunmap_atomic(miter->addr);
601
		} else
602
			kunmap(miter->page);
603
 
604
		miter->page = NULL;
605
		miter->addr = NULL;
606
		miter->length = 0;
607
		miter->consumed = 0;
608
	}
609
}
610
EXPORT_SYMBOL(sg_miter_stop);
611
 
612
/**
613
 * sg_copy_buffer - Copy data between a linear buffer and an SG list
614
 * @sgl:		 The SG list
615
 * @nents:		 Number of SG entries
616
 * @buf:		 Where to copy from
617
 * @buflen:		 The number of bytes to copy
618
 * @skip:		 Number of bytes to skip before copying
619
 * @to_buffer:		 transfer direction (true == from an sg list to a
620
 *			 buffer, false == from a buffer to an sg list
621
 *
622
 * Returns the number of copied bytes.
623
 *
624
 **/
625
size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, void *buf,
626
		      size_t buflen, off_t skip, bool to_buffer)
627
{
628
	unsigned int offset = 0;
629
	struct sg_mapping_iter miter;
630
	unsigned long flags;
631
	unsigned int sg_flags = SG_MITER_ATOMIC;
632
 
633
	if (to_buffer)
634
		sg_flags |= SG_MITER_FROM_SG;
635
	else
636
		sg_flags |= SG_MITER_TO_SG;
637
 
638
	sg_miter_start(&miter, sgl, nents, sg_flags);
639
 
640
	if (!sg_miter_skip(&miter, skip))
641
		return false;
642
 
643
	local_irq_save(flags);
644
 
645
	while (sg_miter_next(&miter) && offset < buflen) {
646
		unsigned int len;
647
 
648
		len = min(miter.length, buflen - offset);
649
 
650
		if (to_buffer)
651
			memcpy(buf + offset, miter.addr, len);
652
		else
653
			memcpy(miter.addr, buf + offset, len);
654
 
655
		offset += len;
656
	}
657
 
658
	sg_miter_stop(&miter);
659
 
660
	local_irq_restore(flags);
661
	return offset;
662
}
663
EXPORT_SYMBOL(sg_copy_buffer);
664
 
665
/**
666
 * sg_copy_from_buffer - Copy from a linear buffer to an SG list
667
 * @sgl:		 The SG list
668
 * @nents:		 Number of SG entries
669
 * @buf:		 Where to copy from
670
 * @buflen:		 The number of bytes to copy
671
 *
672
 * Returns the number of copied bytes.
673
 *
674
 **/
675
size_t sg_copy_from_buffer(struct scatterlist *sgl, unsigned int nents,
676
			   const void *buf, size_t buflen)
677
{
678
	return sg_copy_buffer(sgl, nents, (void *)buf, buflen, 0, false);
679
}
680
EXPORT_SYMBOL(sg_copy_from_buffer);
681
 
682
/**
683
 * sg_copy_to_buffer - Copy from an SG list to a linear buffer
684
 * @sgl:		 The SG list
685
 * @nents:		 Number of SG entries
686
 * @buf:		 Where to copy to
687
 * @buflen:		 The number of bytes to copy
688
 *
689
 * Returns the number of copied bytes.
690
 *
691
 **/
692
size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents,
693
			 void *buf, size_t buflen)
694
{
695
	return sg_copy_buffer(sgl, nents, buf, buflen, 0, true);
696
}
697
EXPORT_SYMBOL(sg_copy_to_buffer);
698
 
699
/**
700
 * sg_pcopy_from_buffer - Copy from a linear buffer to an SG list
701
 * @sgl:		 The SG list
702
 * @nents:		 Number of SG entries
703
 * @buf:		 Where to copy from
704
 * @buflen:		 The number of bytes to copy
705
 * @skip:		 Number of bytes to skip before copying
706
 *
707
 * Returns the number of copied bytes.
708
 *
709
 **/
710
size_t sg_pcopy_from_buffer(struct scatterlist *sgl, unsigned int nents,
711
			    const void *buf, size_t buflen, off_t skip)
712
{
713
	return sg_copy_buffer(sgl, nents, (void *)buf, buflen, skip, false);
714
}
715
EXPORT_SYMBOL(sg_pcopy_from_buffer);
716
 
717
/**
718
 * sg_pcopy_to_buffer - Copy from an SG list to a linear buffer
719
 * @sgl:		 The SG list
720
 * @nents:		 Number of SG entries
721
 * @buf:		 Where to copy to
722
 * @buflen:		 The number of bytes to copy
723
 * @skip:		 Number of bytes to skip before copying
724
 *
725
 * Returns the number of copied bytes.
726
 *
727
 **/
728
size_t sg_pcopy_to_buffer(struct scatterlist *sgl, unsigned int nents,
729
			  void *buf, size_t buflen, off_t skip)
730
{
731
	return sg_copy_buffer(sgl, nents, buf, buflen, skip, true);
732
}
733
EXPORT_SYMBOL(sg_pcopy_to_buffer);