Subversion Repositories Kolibri OS

Rev

Rev 6293 | Only display areas with differences | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 6293 Rev 6936
1
/*
1
/*
2
 * Copyright (C) 2007 Jens Axboe 
2
 * Copyright (C) 2007 Jens Axboe 
3
 *
3
 *
4
 * Scatterlist handling helpers.
4
 * Scatterlist handling helpers.
5
 *
5
 *
6
 * This source code is licensed under the GNU General Public License,
6
 * This source code is licensed under the GNU General Public License,
7
 * Version 2. See the file COPYING for more details.
7
 * Version 2. See the file COPYING for more details.
8
 */
8
 */
9
#include 
9
#include 
10
#include 
10
#include 
11
#include 
11
#include 
12
 
12
 
13
/**
13
/**
14
 * sg_next - return the next scatterlist entry in a list
14
 * sg_next - return the next scatterlist entry in a list
15
 * @sg:		The current sg entry
15
 * @sg:		The current sg entry
16
 *
16
 *
17
 * Description:
17
 * Description:
18
 *   Usually the next entry will be @sg@ + 1, but if this sg element is part
18
 *   Usually the next entry will be @sg@ + 1, but if this sg element is part
19
 *   of a chained scatterlist, it could jump to the start of a new
19
 *   of a chained scatterlist, it could jump to the start of a new
20
 *   scatterlist array.
20
 *   scatterlist array.
21
 *
21
 *
22
 **/
22
 **/
23
struct scatterlist *sg_next(struct scatterlist *sg)
23
struct scatterlist *sg_next(struct scatterlist *sg)
24
{
24
{
25
#ifdef CONFIG_DEBUG_SG
25
#ifdef CONFIG_DEBUG_SG
26
	BUG_ON(sg->sg_magic != SG_MAGIC);
26
	BUG_ON(sg->sg_magic != SG_MAGIC);
27
#endif
27
#endif
28
	if (sg_is_last(sg))
28
	if (sg_is_last(sg))
29
		return NULL;
29
		return NULL;
30
 
30
 
31
	sg++;
31
	sg++;
32
	if (unlikely(sg_is_chain(sg)))
32
	if (unlikely(sg_is_chain(sg)))
33
		sg = sg_chain_ptr(sg);
33
		sg = sg_chain_ptr(sg);
34
 
34
 
35
	return sg;
35
	return sg;
36
}
36
}
37
EXPORT_SYMBOL(sg_next);
37
EXPORT_SYMBOL(sg_next);
38
 
38
 
39
/**
39
/**
40
 * sg_nents - return total count of entries in scatterlist
40
 * sg_nents - return total count of entries in scatterlist
41
 * @sg:		The scatterlist
41
 * @sg:		The scatterlist
42
 *
42
 *
43
 * Description:
43
 * Description:
44
 * Allows to know how many entries are in sg, taking into acount
44
 * Allows to know how many entries are in sg, taking into acount
45
 * chaining as well
45
 * chaining as well
46
 *
46
 *
47
 **/
47
 **/
48
int sg_nents(struct scatterlist *sg)
48
int sg_nents(struct scatterlist *sg)
49
{
49
{
50
	int nents;
50
	int nents;
51
	for (nents = 0; sg; sg = sg_next(sg))
51
	for (nents = 0; sg; sg = sg_next(sg))
52
		nents++;
52
		nents++;
53
	return nents;
53
	return nents;
54
}
54
}
55
EXPORT_SYMBOL(sg_nents);
55
EXPORT_SYMBOL(sg_nents);
56
 
56
 
57
/**
57
/**
58
 * sg_nents_for_len - return total count of entries in scatterlist
58
 * sg_nents_for_len - return total count of entries in scatterlist
59
 *                    needed to satisfy the supplied length
59
 *                    needed to satisfy the supplied length
60
 * @sg:		The scatterlist
60
 * @sg:		The scatterlist
61
 * @len:	The total required length
61
 * @len:	The total required length
62
 *
62
 *
63
 * Description:
63
 * Description:
64
 * Determines the number of entries in sg that are required to meet
64
 * Determines the number of entries in sg that are required to meet
65
 * the supplied length, taking into acount chaining as well
65
 * the supplied length, taking into acount chaining as well
66
 *
66
 *
67
 * Returns:
67
 * Returns:
68
 *   the number of sg entries needed, negative error on failure
68
 *   the number of sg entries needed, negative error on failure
69
 *
69
 *
70
 **/
70
 **/
71
int sg_nents_for_len(struct scatterlist *sg, u64 len)
71
int sg_nents_for_len(struct scatterlist *sg, u64 len)
72
{
72
{
73
	int nents;
73
	int nents;
74
	u64 total;
74
	u64 total;
75
 
75
 
76
	if (!len)
76
	if (!len)
77
		return 0;
77
		return 0;
78
 
78
 
79
	for (nents = 0, total = 0; sg; sg = sg_next(sg)) {
79
	for (nents = 0, total = 0; sg; sg = sg_next(sg)) {
80
		nents++;
80
		nents++;
81
		total += sg->length;
81
		total += sg->length;
82
		if (total >= len)
82
		if (total >= len)
83
			return nents;
83
			return nents;
84
	}
84
	}
85
 
85
 
86
	return -EINVAL;
86
	return -EINVAL;
87
}
87
}
88
EXPORT_SYMBOL(sg_nents_for_len);
88
EXPORT_SYMBOL(sg_nents_for_len);
89
 
89
 
90
/**
90
/**
91
 * sg_last - return the last scatterlist entry in a list
91
 * sg_last - return the last scatterlist entry in a list
92
 * @sgl:	First entry in the scatterlist
92
 * @sgl:	First entry in the scatterlist
93
 * @nents:	Number of entries in the scatterlist
93
 * @nents:	Number of entries in the scatterlist
94
 *
94
 *
95
 * Description:
95
 * Description:
96
 *   Should only be used casually, it (currently) scans the entire list
96
 *   Should only be used casually, it (currently) scans the entire list
97
 *   to get the last entry.
97
 *   to get the last entry.
98
 *
98
 *
99
 *   Note that the @sgl@ pointer passed in need not be the first one,
99
 *   Note that the @sgl@ pointer passed in need not be the first one,
100
 *   the important bit is that @nents@ denotes the number of entries that
100
 *   the important bit is that @nents@ denotes the number of entries that
101
 *   exist from @sgl@.
101
 *   exist from @sgl@.
102
 *
102
 *
103
 **/
103
 **/
104
struct scatterlist *sg_last(struct scatterlist *sgl, unsigned int nents)
104
struct scatterlist *sg_last(struct scatterlist *sgl, unsigned int nents)
105
{
105
{
106
	struct scatterlist *sg, *ret = NULL;
106
	struct scatterlist *sg, *ret = NULL;
107
	unsigned int i;
107
	unsigned int i;
108
 
108
 
109
	for_each_sg(sgl, sg, nents, i)
109
	for_each_sg(sgl, sg, nents, i)
110
		ret = sg;
110
		ret = sg;
111
 
111
 
112
#ifdef CONFIG_DEBUG_SG
112
#ifdef CONFIG_DEBUG_SG
113
	BUG_ON(sgl[0].sg_magic != SG_MAGIC);
113
	BUG_ON(sgl[0].sg_magic != SG_MAGIC);
114
	BUG_ON(!sg_is_last(ret));
114
	BUG_ON(!sg_is_last(ret));
115
#endif
115
#endif
116
	return ret;
116
	return ret;
117
}
117
}
118
EXPORT_SYMBOL(sg_last);
118
EXPORT_SYMBOL(sg_last);
119
 
119
 
120
/**
120
/**
121
 * sg_init_table - Initialize SG table
121
 * sg_init_table - Initialize SG table
122
 * @sgl:	   The SG table
122
 * @sgl:	   The SG table
123
 * @nents:	   Number of entries in table
123
 * @nents:	   Number of entries in table
124
 *
124
 *
125
 * Notes:
125
 * Notes:
126
 *   If this is part of a chained sg table, sg_mark_end() should be
126
 *   If this is part of a chained sg table, sg_mark_end() should be
127
 *   used only on the last table part.
127
 *   used only on the last table part.
128
 *
128
 *
129
 **/
129
 **/
130
void sg_init_table(struct scatterlist *sgl, unsigned int nents)
130
void sg_init_table(struct scatterlist *sgl, unsigned int nents)
131
{
131
{
132
	memset(sgl, 0, sizeof(*sgl) * nents);
132
	memset(sgl, 0, sizeof(*sgl) * nents);
133
#ifdef CONFIG_DEBUG_SG
133
#ifdef CONFIG_DEBUG_SG
134
	{
134
	{
135
		unsigned int i;
135
		unsigned int i;
136
		for (i = 0; i < nents; i++)
136
		for (i = 0; i < nents; i++)
137
			sgl[i].sg_magic = SG_MAGIC;
137
			sgl[i].sg_magic = SG_MAGIC;
138
	}
138
	}
139
#endif
139
#endif
140
	sg_mark_end(&sgl[nents - 1]);
140
	sg_mark_end(&sgl[nents - 1]);
141
}
141
}
142
EXPORT_SYMBOL(sg_init_table);
142
EXPORT_SYMBOL(sg_init_table);
143
 
143
 
144
/**
144
/**
145
 * sg_init_one - Initialize a single entry sg list
145
 * sg_init_one - Initialize a single entry sg list
146
 * @sg:		 SG entry
146
 * @sg:		 SG entry
147
 * @buf:	 Virtual address for IO
147
 * @buf:	 Virtual address for IO
148
 * @buflen:	 IO length
148
 * @buflen:	 IO length
149
 *
149
 *
150
 **/
150
 **/
151
//void sg_init_one(struct scatterlist *sg, const void *buf, unsigned int buflen)
151
//void sg_init_one(struct scatterlist *sg, const void *buf, unsigned int buflen)
152
//{
152
//{
153
//   sg_init_table(sg, 1);
153
//   sg_init_table(sg, 1);
154
//   sg_set_buf(sg, buf, buflen);
154
//   sg_set_buf(sg, buf, buflen);
155
//}
155
//}
156
EXPORT_SYMBOL(sg_init_one);
156
EXPORT_SYMBOL(sg_init_one);
157
 
157
 
158
/*
158
/*
159
 * The default behaviour of sg_alloc_table() is to use these kmalloc/kfree
159
 * The default behaviour of sg_alloc_table() is to use these kmalloc/kfree
160
 * helpers.
160
 * helpers.
161
 */
161
 */
162
static struct scatterlist *sg_kmalloc(unsigned int nents, gfp_t gfp_mask)
162
static struct scatterlist *sg_kmalloc(unsigned int nents, gfp_t gfp_mask)
163
{
163
{
164
	return kmalloc(nents * sizeof(struct scatterlist), gfp_mask);
164
	return kmalloc(nents * sizeof(struct scatterlist), gfp_mask);
165
}
165
}
166
 
166
 
167
static void sg_kfree(struct scatterlist *sg, unsigned int nents)
167
static void sg_kfree(struct scatterlist *sg, unsigned int nents)
168
{
168
{
169
	kfree(sg);
169
	kfree(sg);
170
}
170
}
171
 
171
 
172
/**
172
/**
173
 * __sg_free_table - Free a previously mapped sg table
173
 * __sg_free_table - Free a previously mapped sg table
174
 * @table:	The sg table header to use
174
 * @table:	The sg table header to use
175
 * @max_ents:	The maximum number of entries per single scatterlist
175
 * @max_ents:	The maximum number of entries per single scatterlist
176
 * @skip_first_chunk: don't free the (preallocated) first scatterlist chunk
176
 * @skip_first_chunk: don't free the (preallocated) first scatterlist chunk
177
 * @free_fn:	Free function
177
 * @free_fn:	Free function
178
 *
178
 *
179
 *  Description:
179
 *  Description:
180
 *    Free an sg table previously allocated and setup with
180
 *    Free an sg table previously allocated and setup with
181
 *    __sg_alloc_table().  The @max_ents value must be identical to
181
 *    __sg_alloc_table().  The @max_ents value must be identical to
182
 *    that previously used with __sg_alloc_table().
182
 *    that previously used with __sg_alloc_table().
183
 *
183
 *
184
 **/
184
 **/
185
void __sg_free_table(struct sg_table *table, unsigned int max_ents,
185
void __sg_free_table(struct sg_table *table, unsigned int max_ents,
186
		     bool skip_first_chunk, sg_free_fn *free_fn)
186
		     bool skip_first_chunk, sg_free_fn *free_fn)
187
{
187
{
188
	struct scatterlist *sgl, *next;
188
	struct scatterlist *sgl, *next;
189
 
189
 
190
	if (unlikely(!table->sgl))
190
	if (unlikely(!table->sgl))
191
		return;
191
		return;
192
 
192
 
193
	sgl = table->sgl;
193
	sgl = table->sgl;
194
	while (table->orig_nents) {
194
	while (table->orig_nents) {
195
		unsigned int alloc_size = table->orig_nents;
195
		unsigned int alloc_size = table->orig_nents;
196
		unsigned int sg_size;
196
		unsigned int sg_size;
197
 
197
 
198
		/*
198
		/*
199
		 * If we have more than max_ents segments left,
199
		 * If we have more than max_ents segments left,
200
		 * then assign 'next' to the sg table after the current one.
200
		 * then assign 'next' to the sg table after the current one.
201
		 * sg_size is then one less than alloc size, since the last
201
		 * sg_size is then one less than alloc size, since the last
202
		 * element is the chain pointer.
202
		 * element is the chain pointer.
203
		 */
203
		 */
204
		if (alloc_size > max_ents) {
204
		if (alloc_size > max_ents) {
205
			next = sg_chain_ptr(&sgl[max_ents - 1]);
205
			next = sg_chain_ptr(&sgl[max_ents - 1]);
206
			alloc_size = max_ents;
206
			alloc_size = max_ents;
207
			sg_size = alloc_size - 1;
207
			sg_size = alloc_size - 1;
208
		} else {
208
		} else {
209
			sg_size = alloc_size;
209
			sg_size = alloc_size;
210
			next = NULL;
210
			next = NULL;
211
		}
211
		}
212
 
212
 
213
		table->orig_nents -= sg_size;
213
		table->orig_nents -= sg_size;
214
		if (skip_first_chunk)
214
		if (skip_first_chunk)
215
			skip_first_chunk = false;
215
			skip_first_chunk = false;
216
		else
216
		else
217
			free_fn(sgl, alloc_size);
217
			free_fn(sgl, alloc_size);
218
		sgl = next;
218
		sgl = next;
219
	}
219
	}
220
 
220
 
221
	table->sgl = NULL;
221
	table->sgl = NULL;
222
}
222
}
223
EXPORT_SYMBOL(__sg_free_table);
223
EXPORT_SYMBOL(__sg_free_table);
224
 
224
 
225
/**
225
/**
226
 * sg_free_table - Free a previously allocated sg table
226
 * sg_free_table - Free a previously allocated sg table
227
 * @table:	The mapped sg table header
227
 * @table:	The mapped sg table header
228
 *
228
 *
229
 **/
229
 **/
230
void sg_free_table(struct sg_table *table)
230
void sg_free_table(struct sg_table *table)
231
{
231
{
232
	__sg_free_table(table, SG_MAX_SINGLE_ALLOC, false, sg_kfree);
232
	__sg_free_table(table, SG_MAX_SINGLE_ALLOC, false, sg_kfree);
233
}
233
}
234
EXPORT_SYMBOL(sg_free_table);
234
EXPORT_SYMBOL(sg_free_table);
235
 
235
 
236
/**
236
/**
237
 * __sg_alloc_table - Allocate and initialize an sg table with given allocator
237
 * __sg_alloc_table - Allocate and initialize an sg table with given allocator
238
 * @table:	The sg table header to use
238
 * @table:	The sg table header to use
239
 * @nents:	Number of entries in sg list
239
 * @nents:	Number of entries in sg list
240
 * @max_ents:	The maximum number of entries the allocator returns per call
240
 * @max_ents:	The maximum number of entries the allocator returns per call
241
 * @gfp_mask:	GFP allocation mask
241
 * @gfp_mask:	GFP allocation mask
242
 * @alloc_fn:	Allocator to use
242
 * @alloc_fn:	Allocator to use
243
 *
243
 *
244
 * Description:
244
 * Description:
245
 *   This function returns a @table @nents long. The allocator is
245
 *   This function returns a @table @nents long. The allocator is
246
 *   defined to return scatterlist chunks of maximum size @max_ents.
246
 *   defined to return scatterlist chunks of maximum size @max_ents.
247
 *   Thus if @nents is bigger than @max_ents, the scatterlists will be
247
 *   Thus if @nents is bigger than @max_ents, the scatterlists will be
248
 *   chained in units of @max_ents.
248
 *   chained in units of @max_ents.
249
 *
249
 *
250
 * Notes:
250
 * Notes:
251
 *   If this function returns non-0 (eg failure), the caller must call
251
 *   If this function returns non-0 (eg failure), the caller must call
252
 *   __sg_free_table() to cleanup any leftover allocations.
252
 *   __sg_free_table() to cleanup any leftover allocations.
253
 *
253
 *
254
 **/
254
 **/
255
int __sg_alloc_table(struct sg_table *table, unsigned int nents,
255
int __sg_alloc_table(struct sg_table *table, unsigned int nents,
256
		     unsigned int max_ents, struct scatterlist *first_chunk,
256
		     unsigned int max_ents, struct scatterlist *first_chunk,
257
		     gfp_t gfp_mask, sg_alloc_fn *alloc_fn)
257
		     gfp_t gfp_mask, sg_alloc_fn *alloc_fn)
258
{
258
{
259
	struct scatterlist *sg, *prv;
259
	struct scatterlist *sg, *prv;
260
	unsigned int left;
260
	unsigned int left;
261
 
261
 
262
	memset(table, 0, sizeof(*table));
262
	memset(table, 0, sizeof(*table));
263
 
263
 
264
	if (nents == 0)
264
	if (nents == 0)
265
		return -EINVAL;
265
		return -EINVAL;
266
#ifndef CONFIG_ARCH_HAS_SG_CHAIN
266
#ifndef CONFIG_ARCH_HAS_SG_CHAIN
267
	if (WARN_ON_ONCE(nents > max_ents))
267
	if (WARN_ON_ONCE(nents > max_ents))
268
		return -EINVAL;
268
		return -EINVAL;
269
#endif
269
#endif
270
 
270
 
271
	left = nents;
271
	left = nents;
272
	prv = NULL;
272
	prv = NULL;
273
	do {
273
	do {
274
		unsigned int sg_size, alloc_size = left;
274
		unsigned int sg_size, alloc_size = left;
275
 
275
 
276
		if (alloc_size > max_ents) {
276
		if (alloc_size > max_ents) {
277
			alloc_size = max_ents;
277
			alloc_size = max_ents;
278
			sg_size = alloc_size - 1;
278
			sg_size = alloc_size - 1;
279
		} else
279
		} else
280
			sg_size = alloc_size;
280
			sg_size = alloc_size;
281
 
281
 
282
		left -= sg_size;
282
		left -= sg_size;
283
 
283
 
284
		if (first_chunk) {
284
		if (first_chunk) {
285
			sg = first_chunk;
285
			sg = first_chunk;
286
			first_chunk = NULL;
286
			first_chunk = NULL;
287
		} else {
287
		} else {
288
			sg = alloc_fn(alloc_size, gfp_mask);
288
			sg = alloc_fn(alloc_size, gfp_mask);
289
		}
289
		}
290
		if (unlikely(!sg)) {
290
		if (unlikely(!sg)) {
291
			/*
291
			/*
292
			 * Adjust entry count to reflect that the last
292
			 * Adjust entry count to reflect that the last
293
			 * entry of the previous table won't be used for
293
			 * entry of the previous table won't be used for
294
			 * linkage.  Without this, sg_kfree() may get
294
			 * linkage.  Without this, sg_kfree() may get
295
			 * confused.
295
			 * confused.
296
			 */
296
			 */
297
			if (prv)
297
			if (prv)
298
				table->nents = ++table->orig_nents;
298
				table->nents = ++table->orig_nents;
299
 
299
 
300
 			return -ENOMEM;
300
 			return -ENOMEM;
301
		}
301
		}
302
 
302
 
303
		sg_init_table(sg, alloc_size);
303
		sg_init_table(sg, alloc_size);
304
		table->nents = table->orig_nents += sg_size;
304
		table->nents = table->orig_nents += sg_size;
305
 
305
 
306
		/*
306
		/*
307
		 * If this is the first mapping, assign the sg table header.
307
		 * If this is the first mapping, assign the sg table header.
308
		 * If this is not the first mapping, chain previous part.
308
		 * If this is not the first mapping, chain previous part.
309
		 */
309
		 */
310
		if (prv)
310
		if (prv)
311
			sg_chain(prv, max_ents, sg);
311
			sg_chain(prv, max_ents, sg);
312
		else
312
		else
313
			table->sgl = sg;
313
			table->sgl = sg;
314
 
314
 
315
		/*
315
		/*
316
		 * If no more entries after this one, mark the end
316
		 * If no more entries after this one, mark the end
317
		 */
317
		 */
318
		if (!left)
318
		if (!left)
319
			sg_mark_end(&sg[sg_size - 1]);
319
			sg_mark_end(&sg[sg_size - 1]);
320
 
320
 
321
		prv = sg;
321
		prv = sg;
322
	} while (left);
322
	} while (left);
323
 
323
 
324
	return 0;
324
	return 0;
325
}
325
}
326
EXPORT_SYMBOL(__sg_alloc_table);
326
EXPORT_SYMBOL(__sg_alloc_table);
327
 
327
 
328
/**
328
/**
329
 * sg_alloc_table - Allocate and initialize an sg table
329
 * sg_alloc_table - Allocate and initialize an sg table
330
 * @table:	The sg table header to use
330
 * @table:	The sg table header to use
331
 * @nents:	Number of entries in sg list
331
 * @nents:	Number of entries in sg list
332
 * @gfp_mask:	GFP allocation mask
332
 * @gfp_mask:	GFP allocation mask
333
 *
333
 *
334
 *  Description:
334
 *  Description:
335
 *    Allocate and initialize an sg table. If @nents@ is larger than
335
 *    Allocate and initialize an sg table. If @nents@ is larger than
336
 *    SG_MAX_SINGLE_ALLOC a chained sg table will be setup.
336
 *    SG_MAX_SINGLE_ALLOC a chained sg table will be setup.
337
 *
337
 *
338
 **/
338
 **/
339
int sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask)
339
int sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask)
340
{
340
{
341
	int ret;
341
	int ret;
342
 
342
 
343
	ret = __sg_alloc_table(table, nents, SG_MAX_SINGLE_ALLOC,
343
	ret = __sg_alloc_table(table, nents, SG_MAX_SINGLE_ALLOC,
344
			       NULL, gfp_mask, sg_kmalloc);
344
			       NULL, gfp_mask, sg_kmalloc);
345
	if (unlikely(ret))
345
	if (unlikely(ret))
346
		__sg_free_table(table, SG_MAX_SINGLE_ALLOC, false, sg_kfree);
346
		__sg_free_table(table, SG_MAX_SINGLE_ALLOC, false, sg_kfree);
347
 
347
 
348
	return ret;
348
	return ret;
349
}
349
}
350
EXPORT_SYMBOL(sg_alloc_table);
350
EXPORT_SYMBOL(sg_alloc_table);
351
 
351
 
352
/**
352
/**
353
 * sg_alloc_table_from_pages - Allocate and initialize an sg table from
353
 * sg_alloc_table_from_pages - Allocate and initialize an sg table from
354
 *			       an array of pages
354
 *			       an array of pages
355
 * @sgt:	The sg table header to use
355
 * @sgt:	The sg table header to use
356
 * @pages:	Pointer to an array of page pointers
356
 * @pages:	Pointer to an array of page pointers
357
 * @n_pages:	Number of pages in the pages array
357
 * @n_pages:	Number of pages in the pages array
358
 * @offset:     Offset from start of the first page to the start of a buffer
358
 * @offset:     Offset from start of the first page to the start of a buffer
359
 * @size:       Number of valid bytes in the buffer (after offset)
359
 * @size:       Number of valid bytes in the buffer (after offset)
360
 * @gfp_mask:	GFP allocation mask
360
 * @gfp_mask:	GFP allocation mask
361
 *
361
 *
362
 *  Description:
362
 *  Description:
363
 *    Allocate and initialize an sg table from a list of pages. Contiguous
363
 *    Allocate and initialize an sg table from a list of pages. Contiguous
364
 *    ranges of the pages are squashed into a single scatterlist node. A user
364
 *    ranges of the pages are squashed into a single scatterlist node. A user
365
 *    may provide an offset at a start and a size of valid data in a buffer
365
 *    may provide an offset at a start and a size of valid data in a buffer
366
 *    specified by the page array. The returned sg table is released by
366
 *    specified by the page array. The returned sg table is released by
367
 *    sg_free_table.
367
 *    sg_free_table.
368
 *
368
 *
369
 * Returns:
369
 * Returns:
370
 *   0 on success, negative error on failure
370
 *   0 on success, negative error on failure
371
 */
371
 */
372
int sg_alloc_table_from_pages(struct sg_table *sgt,
372
int sg_alloc_table_from_pages(struct sg_table *sgt,
373
	struct page **pages, unsigned int n_pages,
373
	struct page **pages, unsigned int n_pages,
374
	unsigned long offset, unsigned long size,
374
	unsigned long offset, unsigned long size,
375
	gfp_t gfp_mask)
375
	gfp_t gfp_mask)
376
{
376
{
377
	unsigned int chunks;
377
	unsigned int chunks;
378
	unsigned int i;
378
	unsigned int i;
379
	unsigned int cur_page;
379
	unsigned int cur_page;
380
	int ret;
380
	int ret;
381
	struct scatterlist *s;
381
	struct scatterlist *s;
382
 
382
 
383
	/* compute number of contiguous chunks */
383
	/* compute number of contiguous chunks */
384
	chunks = 1;
384
	chunks = 1;
385
	for (i = 1; i < n_pages; ++i)
385
	for (i = 1; i < n_pages; ++i)
386
		if (page_to_pfn(pages[i]) != page_to_pfn(pages[i - 1]) + 1)
386
		if (page_to_pfn(pages[i]) != page_to_pfn(pages[i - 1]) + 1)
387
			++chunks;
387
			++chunks;
388
 
388
 
389
	ret = sg_alloc_table(sgt, chunks, gfp_mask);
389
	ret = sg_alloc_table(sgt, chunks, gfp_mask);
390
	if (unlikely(ret))
390
	if (unlikely(ret))
391
		return ret;
391
		return ret;
392
 
392
 
393
	/* merging chunks and putting them into the scatterlist */
393
	/* merging chunks and putting them into the scatterlist */
394
	cur_page = 0;
394
	cur_page = 0;
395
	for_each_sg(sgt->sgl, s, sgt->orig_nents, i) {
395
	for_each_sg(sgt->sgl, s, sgt->orig_nents, i) {
396
		unsigned long chunk_size;
396
		unsigned long chunk_size;
397
		unsigned int j;
397
		unsigned int j;
398
 
398
 
399
		/* look for the end of the current chunk */
399
		/* look for the end of the current chunk */
400
		for (j = cur_page + 1; j < n_pages; ++j)
400
		for (j = cur_page + 1; j < n_pages; ++j)
401
			if (page_to_pfn(pages[j]) !=
401
			if (page_to_pfn(pages[j]) !=
402
			    page_to_pfn(pages[j - 1]) + 1)
402
			    page_to_pfn(pages[j - 1]) + 1)
403
				break;
403
				break;
404
 
404
 
405
		chunk_size = ((j - cur_page) << PAGE_SHIFT) - offset;
405
		chunk_size = ((j - cur_page) << PAGE_SHIFT) - offset;
406
		sg_set_page(s, pages[cur_page], min(size, chunk_size), offset);
406
		sg_set_page(s, pages[cur_page], min(size, chunk_size), offset);
407
		size -= chunk_size;
407
		size -= chunk_size;
408
		offset = 0;
408
		offset = 0;
409
		cur_page = j;
409
		cur_page = j;
410
	}
410
	}
411
 
411
 
412
	return 0;
412
	return 0;
413
}
413
}
414
EXPORT_SYMBOL(sg_alloc_table_from_pages);
414
EXPORT_SYMBOL(sg_alloc_table_from_pages);
415
 
415
 
416
void __sg_page_iter_start(struct sg_page_iter *piter,
416
void __sg_page_iter_start(struct sg_page_iter *piter,
417
			  struct scatterlist *sglist, unsigned int nents,
417
			  struct scatterlist *sglist, unsigned int nents,
418
			  unsigned long pgoffset)
418
			  unsigned long pgoffset)
419
{
419
{
420
	piter->__pg_advance = 0;
420
	piter->__pg_advance = 0;
421
	piter->__nents = nents;
421
	piter->__nents = nents;
422
 
422
 
423
	piter->sg = sglist;
423
	piter->sg = sglist;
424
	piter->sg_pgoffset = pgoffset;
424
	piter->sg_pgoffset = pgoffset;
425
}
425
}
426
EXPORT_SYMBOL(__sg_page_iter_start);
426
EXPORT_SYMBOL(__sg_page_iter_start);
427
 
427
 
428
static int sg_page_count(struct scatterlist *sg)
428
static int sg_page_count(struct scatterlist *sg)
429
{
429
{
430
	return PAGE_ALIGN(sg->offset + sg->length) >> PAGE_SHIFT;
430
	return PAGE_ALIGN(sg->offset + sg->length) >> PAGE_SHIFT;
431
}
431
}
432
 
432
 
433
bool __sg_page_iter_next(struct sg_page_iter *piter)
433
bool __sg_page_iter_next(struct sg_page_iter *piter)
434
{
434
{
435
	if (!piter->__nents || !piter->sg)
435
	if (!piter->__nents || !piter->sg)
436
		return false;
436
		return false;
437
 
437
 
438
	piter->sg_pgoffset += piter->__pg_advance;
438
	piter->sg_pgoffset += piter->__pg_advance;
439
	piter->__pg_advance = 1;
439
	piter->__pg_advance = 1;
440
 
440
 
441
	while (piter->sg_pgoffset >= sg_page_count(piter->sg)) {
441
	while (piter->sg_pgoffset >= sg_page_count(piter->sg)) {
442
		piter->sg_pgoffset -= sg_page_count(piter->sg);
442
		piter->sg_pgoffset -= sg_page_count(piter->sg);
443
		piter->sg = sg_next(piter->sg);
443
		piter->sg = sg_next(piter->sg);
444
		if (!--piter->__nents || !piter->sg)
444
		if (!--piter->__nents || !piter->sg)
445
			return false;
445
			return false;
446
	}
446
	}
447
 
447
 
448
	return true;
448
	return true;
449
}
449
}
450
EXPORT_SYMBOL(__sg_page_iter_next);
450
EXPORT_SYMBOL(__sg_page_iter_next);
451
 
451
 
452
/**
452
/**
453
 * sg_miter_start - start mapping iteration over a sg list
453
 * sg_miter_start - start mapping iteration over a sg list
454
 * @miter: sg mapping iter to be started
454
 * @miter: sg mapping iter to be started
455
 * @sgl: sg list to iterate over
455
 * @sgl: sg list to iterate over
456
 * @nents: number of sg entries
456
 * @nents: number of sg entries
457
 *
457
 *
458
 * Description:
458
 * Description:
459
 *   Starts mapping iterator @miter.
459
 *   Starts mapping iterator @miter.
460
 *
460
 *
461
 * Context:
461
 * Context:
462
 *   Don't care.
462
 *   Don't care.
463
 */
463
 */
464
void sg_miter_start(struct sg_mapping_iter *miter, struct scatterlist *sgl,
464
void sg_miter_start(struct sg_mapping_iter *miter, struct scatterlist *sgl,
465
		    unsigned int nents, unsigned int flags)
465
		    unsigned int nents, unsigned int flags)
466
{
466
{
467
	memset(miter, 0, sizeof(struct sg_mapping_iter));
467
	memset(miter, 0, sizeof(struct sg_mapping_iter));
468
 
468
 
469
	__sg_page_iter_start(&miter->piter, sgl, nents, 0);
469
	__sg_page_iter_start(&miter->piter, sgl, nents, 0);
470
	WARN_ON(!(flags & (SG_MITER_TO_SG | SG_MITER_FROM_SG)));
470
	WARN_ON(!(flags & (SG_MITER_TO_SG | SG_MITER_FROM_SG)));
471
	miter->__flags = flags;
471
	miter->__flags = flags;
472
}
472
}
473
EXPORT_SYMBOL(sg_miter_start);
473
EXPORT_SYMBOL(sg_miter_start);
474
 
474
 
475
static bool sg_miter_get_next_page(struct sg_mapping_iter *miter)
475
static bool sg_miter_get_next_page(struct sg_mapping_iter *miter)
476
{
476
{
477
	if (!miter->__remaining) {
477
	if (!miter->__remaining) {
478
		struct scatterlist *sg;
478
		struct scatterlist *sg;
479
		unsigned long pgoffset;
479
		unsigned long pgoffset;
480
 
480
 
481
		if (!__sg_page_iter_next(&miter->piter))
481
		if (!__sg_page_iter_next(&miter->piter))
482
			return false;
482
			return false;
483
 
483
 
484
		sg = miter->piter.sg;
484
		sg = miter->piter.sg;
485
		pgoffset = miter->piter.sg_pgoffset;
485
		pgoffset = miter->piter.sg_pgoffset;
486
 
486
 
487
		miter->__offset = pgoffset ? 0 : sg->offset;
487
		miter->__offset = pgoffset ? 0 : sg->offset;
488
		miter->__remaining = sg->offset + sg->length -
488
		miter->__remaining = sg->offset + sg->length -
489
				(pgoffset << PAGE_SHIFT) - miter->__offset;
489
				(pgoffset << PAGE_SHIFT) - miter->__offset;
490
		miter->__remaining = min_t(unsigned long, miter->__remaining,
490
		miter->__remaining = min_t(unsigned long, miter->__remaining,
491
					   PAGE_SIZE - miter->__offset);
491
					   PAGE_SIZE - miter->__offset);
492
	}
492
	}
493
 
493
 
494
	return true;
494
	return true;
495
}
495
}
496
 
496
 
497
/**
497
/**
498
 * sg_miter_skip - reposition mapping iterator
498
 * sg_miter_skip - reposition mapping iterator
499
 * @miter: sg mapping iter to be skipped
499
 * @miter: sg mapping iter to be skipped
500
 * @offset: number of bytes to plus the current location
500
 * @offset: number of bytes to plus the current location
501
 *
501
 *
502
 * Description:
502
 * Description:
503
 *   Sets the offset of @miter to its current location plus @offset bytes.
503
 *   Sets the offset of @miter to its current location plus @offset bytes.
504
 *   If mapping iterator @miter has been proceeded by sg_miter_next(), this
504
 *   If mapping iterator @miter has been proceeded by sg_miter_next(), this
505
 *   stops @miter.
505
 *   stops @miter.
506
 *
506
 *
507
 * Context:
507
 * Context:
508
 *   Don't care if @miter is stopped, or not proceeded yet.
508
 *   Don't care if @miter is stopped, or not proceeded yet.
509
 *   Otherwise, preemption disabled if the SG_MITER_ATOMIC is set.
509
 *   Otherwise, preemption disabled if the SG_MITER_ATOMIC is set.
510
 *
510
 *
511
 * Returns:
511
 * Returns:
512
 *   true if @miter contains the valid mapping.  false if end of sg
512
 *   true if @miter contains the valid mapping.  false if end of sg
513
 *   list is reached.
513
 *   list is reached.
514
 */
514
 */
515
bool sg_miter_skip(struct sg_mapping_iter *miter, off_t offset)
515
bool sg_miter_skip(struct sg_mapping_iter *miter, off_t offset)
516
{
516
{
517
	sg_miter_stop(miter);
517
	sg_miter_stop(miter);
518
 
518
 
519
	while (offset) {
519
	while (offset) {
520
		off_t consumed;
520
		off_t consumed;
521
 
521
 
522
		if (!sg_miter_get_next_page(miter))
522
		if (!sg_miter_get_next_page(miter))
523
			return false;
523
			return false;
524
 
524
 
525
		consumed = min_t(off_t, offset, miter->__remaining);
525
		consumed = min_t(off_t, offset, miter->__remaining);
526
		miter->__offset += consumed;
526
		miter->__offset += consumed;
527
		miter->__remaining -= consumed;
527
		miter->__remaining -= consumed;
528
		offset -= consumed;
528
		offset -= consumed;
529
	}
529
	}
530
 
530
 
531
	return true;
531
	return true;
532
}
532
}
533
EXPORT_SYMBOL(sg_miter_skip);
533
EXPORT_SYMBOL(sg_miter_skip);
534
 
534
 
535
/**
535
/**
536
 * sg_miter_next - proceed mapping iterator to the next mapping
536
 * sg_miter_next - proceed mapping iterator to the next mapping
537
 * @miter: sg mapping iter to proceed
537
 * @miter: sg mapping iter to proceed
538
 *
538
 *
539
 * Description:
539
 * Description:
540
 *   Proceeds @miter to the next mapping.  @miter should have been started
540
 *   Proceeds @miter to the next mapping.  @miter should have been started
541
 *   using sg_miter_start().  On successful return, @miter->page,
541
 *   using sg_miter_start().  On successful return, @miter->page,
542
 *   @miter->addr and @miter->length point to the current mapping.
542
 *   @miter->addr and @miter->length point to the current mapping.
543
 *
543
 *
544
 * Context:
544
 * Context:
545
 *   Preemption disabled if SG_MITER_ATOMIC.  Preemption must stay disabled
545
 *   Preemption disabled if SG_MITER_ATOMIC.  Preemption must stay disabled
546
 *   till @miter is stopped.  May sleep if !SG_MITER_ATOMIC.
546
 *   till @miter is stopped.  May sleep if !SG_MITER_ATOMIC.
547
 *
547
 *
548
 * Returns:
548
 * Returns:
549
 *   true if @miter contains the next mapping.  false if end of sg
549
 *   true if @miter contains the next mapping.  false if end of sg
550
 *   list is reached.
550
 *   list is reached.
551
 */
551
 */
552
bool sg_miter_next(struct sg_mapping_iter *miter)
552
bool sg_miter_next(struct sg_mapping_iter *miter)
553
{
553
{
554
	sg_miter_stop(miter);
554
	sg_miter_stop(miter);
555
 
555
 
556
	/*
556
	/*
557
	 * Get to the next page if necessary.
557
	 * Get to the next page if necessary.
558
	 * __remaining, __offset is adjusted by sg_miter_stop
558
	 * __remaining, __offset is adjusted by sg_miter_stop
559
	 */
559
	 */
560
	if (!sg_miter_get_next_page(miter))
560
	if (!sg_miter_get_next_page(miter))
561
		return false;
561
		return false;
562
 
562
 
563
	miter->page = sg_page_iter_page(&miter->piter);
563
	miter->page = sg_page_iter_page(&miter->piter);
564
	miter->consumed = miter->length = miter->__remaining;
564
	miter->consumed = miter->length = miter->__remaining;
565
 
565
 
566
	if (miter->__flags & SG_MITER_ATOMIC)
566
	if (miter->__flags & SG_MITER_ATOMIC)
567
		miter->addr = kmap_atomic(miter->page) + miter->__offset;
567
		miter->addr = kmap_atomic(miter->page) + miter->__offset;
568
	else
568
	else
569
		miter->addr = kmap(miter->page) + miter->__offset;
569
		miter->addr = kmap(miter->page) + miter->__offset;
570
 
570
 
571
	return true;
571
	return true;
572
}
572
}
573
EXPORT_SYMBOL(sg_miter_next);
573
EXPORT_SYMBOL(sg_miter_next);
574
 
574
 
575
/**
575
/**
576
 * sg_miter_stop - stop mapping iteration
576
 * sg_miter_stop - stop mapping iteration
577
 * @miter: sg mapping iter to be stopped
577
 * @miter: sg mapping iter to be stopped
578
 *
578
 *
579
 * Description:
579
 * Description:
580
 *   Stops mapping iterator @miter.  @miter should have been started
580
 *   Stops mapping iterator @miter.  @miter should have been started
581
 *   started using sg_miter_start().  A stopped iteration can be
581
 *   using sg_miter_start().  A stopped iteration can be resumed by
582
 *   resumed by calling sg_miter_next() on it.  This is useful when
582
 *   calling sg_miter_next() on it.  This is useful when resources (kmap)
583
 *   resources (kmap) need to be released during iteration.
583
 *   need to be released during iteration.
584
 *
584
 *
585
 * Context:
585
 * Context:
586
 *   Preemption disabled if the SG_MITER_ATOMIC is set.  Don't care
586
 *   Preemption disabled if the SG_MITER_ATOMIC is set.  Don't care
587
 *   otherwise.
587
 *   otherwise.
588
 */
588
 */
589
void sg_miter_stop(struct sg_mapping_iter *miter)
589
void sg_miter_stop(struct sg_mapping_iter *miter)
590
{
590
{
591
	WARN_ON(miter->consumed > miter->length);
591
	WARN_ON(miter->consumed > miter->length);
592
 
592
 
593
	/* drop resources from the last iteration */
593
	/* drop resources from the last iteration */
594
	if (miter->addr) {
594
	if (miter->addr) {
595
		miter->__offset += miter->consumed;
595
		miter->__offset += miter->consumed;
596
		miter->__remaining -= miter->consumed;
596
		miter->__remaining -= miter->consumed;
597
 
597
 
598
		if (miter->__flags & SG_MITER_ATOMIC) {
598
		if (miter->__flags & SG_MITER_ATOMIC) {
599
			WARN_ON_ONCE(preemptible());
599
			WARN_ON_ONCE(preemptible());
600
			kunmap_atomic(miter->addr);
600
			kunmap_atomic(miter->addr);
601
		} else
601
		} else
602
			kunmap(miter->page);
602
			kunmap(miter->page);
603
 
603
 
604
		miter->page = NULL;
604
		miter->page = NULL;
605
		miter->addr = NULL;
605
		miter->addr = NULL;
606
		miter->length = 0;
606
		miter->length = 0;
607
		miter->consumed = 0;
607
		miter->consumed = 0;
608
	}
608
	}
609
}
609
}
610
EXPORT_SYMBOL(sg_miter_stop);
610
EXPORT_SYMBOL(sg_miter_stop);
611
 
611
 
612
/**
612
/**
613
 * sg_copy_buffer - Copy data between a linear buffer and an SG list
613
 * sg_copy_buffer - Copy data between a linear buffer and an SG list
614
 * @sgl:		 The SG list
614
 * @sgl:		 The SG list
615
 * @nents:		 Number of SG entries
615
 * @nents:		 Number of SG entries
616
 * @buf:		 Where to copy from
616
 * @buf:		 Where to copy from
617
 * @buflen:		 The number of bytes to copy
617
 * @buflen:		 The number of bytes to copy
618
 * @skip:		 Number of bytes to skip before copying
618
 * @skip:		 Number of bytes to skip before copying
619
 * @to_buffer:		 transfer direction (true == from an sg list to a
619
 * @to_buffer:		 transfer direction (true == from an sg list to a
620
 *			 buffer, false == from a buffer to an sg list
620
 *			 buffer, false == from a buffer to an sg list
621
 *
621
 *
622
 * Returns the number of copied bytes.
622
 * Returns the number of copied bytes.
623
 *
623
 *
624
 **/
624
 **/
625
size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, void *buf,
625
size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, void *buf,
626
		      size_t buflen, off_t skip, bool to_buffer)
626
		      size_t buflen, off_t skip, bool to_buffer)
627
{
627
{
628
	unsigned int offset = 0;
628
	unsigned int offset = 0;
629
	struct sg_mapping_iter miter;
629
	struct sg_mapping_iter miter;
630
	unsigned long flags;
630
	unsigned long flags;
631
	unsigned int sg_flags = SG_MITER_ATOMIC;
631
	unsigned int sg_flags = SG_MITER_ATOMIC;
632
 
632
 
633
	if (to_buffer)
633
	if (to_buffer)
634
		sg_flags |= SG_MITER_FROM_SG;
634
		sg_flags |= SG_MITER_FROM_SG;
635
	else
635
	else
636
		sg_flags |= SG_MITER_TO_SG;
636
		sg_flags |= SG_MITER_TO_SG;
637
 
637
 
638
	sg_miter_start(&miter, sgl, nents, sg_flags);
638
	sg_miter_start(&miter, sgl, nents, sg_flags);
639
 
639
 
640
	if (!sg_miter_skip(&miter, skip))
640
	if (!sg_miter_skip(&miter, skip))
641
		return false;
641
		return false;
642
 
642
 
643
	local_irq_save(flags);
643
	local_irq_save(flags);
644
 
644
 
645
	while (sg_miter_next(&miter) && offset < buflen) {
645
	while (sg_miter_next(&miter) && offset < buflen) {
646
		unsigned int len;
646
		unsigned int len;
647
 
647
 
648
		len = min(miter.length, buflen - offset);
648
		len = min(miter.length, buflen - offset);
649
 
649
 
650
		if (to_buffer)
650
		if (to_buffer)
651
			memcpy(buf + offset, miter.addr, len);
651
			memcpy(buf + offset, miter.addr, len);
652
		else
652
		else
653
			memcpy(miter.addr, buf + offset, len);
653
			memcpy(miter.addr, buf + offset, len);
654
 
654
 
655
		offset += len;
655
		offset += len;
656
	}
656
	}
657
 
657
 
658
	sg_miter_stop(&miter);
658
	sg_miter_stop(&miter);
659
 
659
 
660
	local_irq_restore(flags);
660
	local_irq_restore(flags);
661
	return offset;
661
	return offset;
662
}
662
}
663
EXPORT_SYMBOL(sg_copy_buffer);
663
EXPORT_SYMBOL(sg_copy_buffer);
664
 
664
 
665
/**
665
/**
666
 * sg_copy_from_buffer - Copy from a linear buffer to an SG list
666
 * sg_copy_from_buffer - Copy from a linear buffer to an SG list
667
 * @sgl:		 The SG list
667
 * @sgl:		 The SG list
668
 * @nents:		 Number of SG entries
668
 * @nents:		 Number of SG entries
669
 * @buf:		 Where to copy from
669
 * @buf:		 Where to copy from
670
 * @buflen:		 The number of bytes to copy
670
 * @buflen:		 The number of bytes to copy
671
 *
671
 *
672
 * Returns the number of copied bytes.
672
 * Returns the number of copied bytes.
673
 *
673
 *
674
 **/
674
 **/
675
size_t sg_copy_from_buffer(struct scatterlist *sgl, unsigned int nents,
675
size_t sg_copy_from_buffer(struct scatterlist *sgl, unsigned int nents,
676
			   const void *buf, size_t buflen)
676
			   const void *buf, size_t buflen)
677
{
677
{
678
	return sg_copy_buffer(sgl, nents, (void *)buf, buflen, 0, false);
678
	return sg_copy_buffer(sgl, nents, (void *)buf, buflen, 0, false);
679
}
679
}
680
EXPORT_SYMBOL(sg_copy_from_buffer);
680
EXPORT_SYMBOL(sg_copy_from_buffer);
681
 
681
 
682
/**
682
/**
683
 * sg_copy_to_buffer - Copy from an SG list to a linear buffer
683
 * sg_copy_to_buffer - Copy from an SG list to a linear buffer
684
 * @sgl:		 The SG list
684
 * @sgl:		 The SG list
685
 * @nents:		 Number of SG entries
685
 * @nents:		 Number of SG entries
686
 * @buf:		 Where to copy to
686
 * @buf:		 Where to copy to
687
 * @buflen:		 The number of bytes to copy
687
 * @buflen:		 The number of bytes to copy
688
 *
688
 *
689
 * Returns the number of copied bytes.
689
 * Returns the number of copied bytes.
690
 *
690
 *
691
 **/
691
 **/
692
size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents,
692
size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents,
693
			 void *buf, size_t buflen)
693
			 void *buf, size_t buflen)
694
{
694
{
695
	return sg_copy_buffer(sgl, nents, buf, buflen, 0, true);
695
	return sg_copy_buffer(sgl, nents, buf, buflen, 0, true);
696
}
696
}
697
EXPORT_SYMBOL(sg_copy_to_buffer);
697
EXPORT_SYMBOL(sg_copy_to_buffer);
698
 
698
 
699
/**
699
/**
700
 * sg_pcopy_from_buffer - Copy from a linear buffer to an SG list
700
 * sg_pcopy_from_buffer - Copy from a linear buffer to an SG list
701
 * @sgl:		 The SG list
701
 * @sgl:		 The SG list
702
 * @nents:		 Number of SG entries
702
 * @nents:		 Number of SG entries
703
 * @buf:		 Where to copy from
703
 * @buf:		 Where to copy from
704
 * @buflen:		 The number of bytes to copy
704
 * @buflen:		 The number of bytes to copy
705
 * @skip:		 Number of bytes to skip before copying
705
 * @skip:		 Number of bytes to skip before copying
706
 *
706
 *
707
 * Returns the number of copied bytes.
707
 * Returns the number of copied bytes.
708
 *
708
 *
709
 **/
709
 **/
710
size_t sg_pcopy_from_buffer(struct scatterlist *sgl, unsigned int nents,
710
size_t sg_pcopy_from_buffer(struct scatterlist *sgl, unsigned int nents,
711
			    const void *buf, size_t buflen, off_t skip)
711
			    const void *buf, size_t buflen, off_t skip)
712
{
712
{
713
	return sg_copy_buffer(sgl, nents, (void *)buf, buflen, skip, false);
713
	return sg_copy_buffer(sgl, nents, (void *)buf, buflen, skip, false);
714
}
714
}
715
EXPORT_SYMBOL(sg_pcopy_from_buffer);
715
EXPORT_SYMBOL(sg_pcopy_from_buffer);
716
 
716
 
717
/**
717
/**
718
 * sg_pcopy_to_buffer - Copy from an SG list to a linear buffer
718
 * sg_pcopy_to_buffer - Copy from an SG list to a linear buffer
719
 * @sgl:		 The SG list
719
 * @sgl:		 The SG list
720
 * @nents:		 Number of SG entries
720
 * @nents:		 Number of SG entries
721
 * @buf:		 Where to copy to
721
 * @buf:		 Where to copy to
722
 * @buflen:		 The number of bytes to copy
722
 * @buflen:		 The number of bytes to copy
723
 * @skip:		 Number of bytes to skip before copying
723
 * @skip:		 Number of bytes to skip before copying
724
 *
724
 *
725
 * Returns the number of copied bytes.
725
 * Returns the number of copied bytes.
726
 *
726
 *
727
 **/
727
 **/
728
size_t sg_pcopy_to_buffer(struct scatterlist *sgl, unsigned int nents,
728
size_t sg_pcopy_to_buffer(struct scatterlist *sgl, unsigned int nents,
729
			  void *buf, size_t buflen, off_t skip)
729
			  void *buf, size_t buflen, off_t skip)
730
{
730
{
731
	return sg_copy_buffer(sgl, nents, buf, buflen, skip, true);
731
	return sg_copy_buffer(sgl, nents, buf, buflen, skip, true);
732
}
732
}
733
EXPORT_SYMBOL(sg_pcopy_to_buffer);
733
EXPORT_SYMBOL(sg_pcopy_to_buffer);