Subversion Repositories Kolibri OS

Rev

Rev 4568 | Rev 5270 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
3243 Serge 1
#ifndef _LINUX_SCATTERLIST_H
2
#define _LINUX_SCATTERLIST_H
3
 
4
#include 
5
#include 
6
#include 
7
 
8
#include 
9
#include 
10
//#include 
11
 
12
struct sg_table {
13
	struct scatterlist *sgl;	/* the list */
14
	unsigned int nents;		/* number of mapped entries */
15
	unsigned int orig_nents;	/* original size of list */
16
};
17
 
18
/*
19
 * Notes on SG table design.
20
 *
21
 * Architectures must provide an unsigned long page_link field in the
22
 * scatterlist struct. We use that to place the page pointer AND encode
23
 * information about the sg table as well. The two lower bits are reserved
24
 * for this information.
25
 *
26
 * If bit 0 is set, then the page_link contains a pointer to the next sg
27
 * table list. Otherwise the next entry is at sg + 1.
28
 *
29
 * If bit 1 is set, then this sg entry is the last element in a list.
30
 *
31
 * See sg_next().
32
 *
33
 */
34
 
35
#define SG_MAGIC	0x87654321
36
 
37
/*
38
 * We overload the LSB of the page pointer to indicate whether it's
39
 * a valid sg entry, or whether it points to the start of a new scatterlist.
40
 * Those low bits are there for everyone! (thanks mason :-)
41
 */
42
#define sg_is_chain(sg)		((sg)->page_link & 0x01)
43
#define sg_is_last(sg)		((sg)->page_link & 0x02)
44
#define sg_chain_ptr(sg)	\
45
	((struct scatterlist *) ((sg)->page_link & ~0x03))
46
 
47
/**
48
 * sg_assign_page - Assign a given page to an SG entry
49
 * @sg:		    SG entry
50
 * @page:	    The page
51
 *
52
 * Description:
53
 *   Assign page to sg entry. Also see sg_set_page(), the most commonly used
54
 *   variant.
55
 *
56
 **/
57
static inline void sg_assign_page(struct scatterlist *sg, struct page *page)
58
{
59
	unsigned long page_link = sg->page_link & 0x3;
60
 
61
	/*
62
	 * In order for the low bit stealing approach to work, pages
63
	 * must be aligned at a 32-bit boundary as a minimum.
64
	 */
65
	BUG_ON((unsigned long) page & 0x03);
66
#ifdef CONFIG_DEBUG_SG
67
	BUG_ON(sg->sg_magic != SG_MAGIC);
68
	BUG_ON(sg_is_chain(sg));
69
#endif
70
	sg->page_link = page_link | (unsigned long) page;
71
}
72
 
73
/**
74
 * sg_set_page - Set sg entry to point at given page
75
 * @sg:		 SG entry
76
 * @page:	 The page
77
 * @len:	 Length of data
78
 * @offset:	 Offset into page
79
 *
80
 * Description:
81
 *   Use this function to set an sg entry pointing at a page, never assign
82
 *   the page directly. We encode sg table information in the lower bits
83
 *   of the page pointer. See sg_page() for looking up the page belonging
84
 *   to an sg entry.
85
 *
86
 **/
87
static inline void sg_set_page(struct scatterlist *sg, struct page *page,
88
			       unsigned int len, unsigned int offset)
89
{
90
	sg_assign_page(sg, page);
91
	sg->offset = offset;
92
	sg->length = len;
93
}
94
 
95
static inline struct page *sg_page(struct scatterlist *sg)
96
{
97
#ifdef CONFIG_DEBUG_SG
98
	BUG_ON(sg->sg_magic != SG_MAGIC);
99
	BUG_ON(sg_is_chain(sg));
100
#endif
101
	return (struct page *)((sg)->page_link & ~0x3);
102
}
103
 
104
/*
105
 * Loop over each sg element, following the pointer to a new list if necessary
106
 */
107
#define for_each_sg(sglist, sg, nr, __i)	\
108
	for (__i = 0, sg = (sglist); __i < (nr); __i++, sg = sg_next(sg))
109
 
110
/**
111
 * sg_chain - Chain two sglists together
112
 * @prv:	First scatterlist
113
 * @prv_nents:	Number of entries in prv
114
 * @sgl:	Second scatterlist
115
 *
116
 * Description:
117
 *   Links @prv@ and @sgl@ together, to form a longer scatterlist.
118
 *
119
 **/
120
static inline void sg_chain(struct scatterlist *prv, unsigned int prv_nents,
121
			    struct scatterlist *sgl)
122
{
123
#ifndef ARCH_HAS_SG_CHAIN
124
	BUG();
125
#endif
126
 
127
	/*
128
	 * offset and length are unused for chain entry.  Clear them.
129
	 */
130
	prv[prv_nents - 1].offset = 0;
131
	prv[prv_nents - 1].length = 0;
132
 
133
	/*
134
	 * Set lowest bit to indicate a link pointer, and make sure to clear
135
	 * the termination bit if it happens to be set.
136
	 */
137
	prv[prv_nents - 1].page_link = ((unsigned long) sgl | 0x01) & ~0x02;
138
}
139
 
140
/**
141
 * sg_mark_end - Mark the end of the scatterlist
142
 * @sg:		 SG entryScatterlist
143
 *
144
 * Description:
145
 *   Marks the passed in sg entry as the termination point for the sg
146
 *   table. A call to sg_next() on this entry will return NULL.
147
 *
148
 **/
149
static inline void sg_mark_end(struct scatterlist *sg)
150
{
151
#ifdef CONFIG_DEBUG_SG
152
	BUG_ON(sg->sg_magic != SG_MAGIC);
153
#endif
154
	/*
155
	 * Set termination bit, clear potential chain bit
156
	 */
157
	sg->page_link |= 0x02;
158
	sg->page_link &= ~0x01;
159
}
160
 
161
/**
3747 Serge 162
 * sg_unmark_end - Undo setting the end of the scatterlist
163
 * @sg:		 SG entryScatterlist
164
 *
165
 * Description:
166
 *   Removes the termination marker from the given entry of the scatterlist.
167
 *
168
 **/
169
static inline void sg_unmark_end(struct scatterlist *sg)
170
{
171
#ifdef CONFIG_DEBUG_SG
172
	BUG_ON(sg->sg_magic != SG_MAGIC);
173
#endif
174
	sg->page_link &= ~0x02;
175
}
176
 
177
/**
3243 Serge 178
 * sg_phys - Return physical address of an sg entry
179
 * @sg:	     SG entry
180
 *
181
 * Description:
182
 *   This calls page_to_phys() on the page in this sg entry, and adds the
183
 *   sg offset. The caller must know that it is legal to call page_to_phys()
184
 *   on the sg page.
185
 *
186
 **/
187
static inline dma_addr_t sg_phys(struct scatterlist *sg)
188
{
189
	return page_to_phys(sg_page(sg)) + sg->offset;
190
}
191
 
192
/**
193
 * sg_virt - Return virtual address of an sg entry
194
 * @sg:      SG entry
195
 *
196
 * Description:
197
 *   This calls page_address() on the page in this sg entry, and adds the
198
 *   sg offset. The caller must know that the sg page has a valid virtual
199
 *   mapping.
200
 *
201
 **/
202
//static inline void *sg_virt(struct scatterlist *sg)
203
//{
204
//   return page_address(sg_page(sg)) + sg->offset;
205
//}
206
 
207
int sg_nents(struct scatterlist *sg);
208
struct scatterlist *sg_next(struct scatterlist *);
209
struct scatterlist *sg_last(struct scatterlist *s, unsigned int);
210
void sg_init_table(struct scatterlist *, unsigned int);
211
void sg_init_one(struct scatterlist *, const void *, unsigned int);
212
 
213
typedef struct scatterlist *(sg_alloc_fn)(unsigned int, gfp_t);
214
typedef void (sg_free_fn)(struct scatterlist *, unsigned int);
215
 
5056 serge 216
void __sg_free_table(struct sg_table *, unsigned int, bool, sg_free_fn *);
3243 Serge 217
void sg_free_table(struct sg_table *);
5056 serge 218
int __sg_alloc_table(struct sg_table *, unsigned int, unsigned int,
219
		     struct scatterlist *, gfp_t, sg_alloc_fn *);
3243 Serge 220
int sg_alloc_table(struct sg_table *, unsigned int, gfp_t);
221
int sg_alloc_table_from_pages(struct sg_table *sgt,
222
	struct page **pages, unsigned int n_pages,
223
	unsigned long offset, unsigned long size,
224
	gfp_t gfp_mask);
225
 
226
size_t sg_copy_from_buffer(struct scatterlist *sgl, unsigned int nents,
227
			   void *buf, size_t buflen);
228
size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents,
229
			 void *buf, size_t buflen);
230
 
5056 serge 231
size_t sg_pcopy_from_buffer(struct scatterlist *sgl, unsigned int nents,
232
			    void *buf, size_t buflen, off_t skip);
233
size_t sg_pcopy_to_buffer(struct scatterlist *sgl, unsigned int nents,
234
			  void *buf, size_t buflen, off_t skip);
235
 
3243 Serge 236
/*
237
 * Maximum number of entries that will be allocated in one piece, if
238
 * a list larger than this is required then chaining will be utilized.
239
 */
3297 Serge 240
#define SG_MAX_SINGLE_ALLOC     (4*PAGE_SIZE / sizeof(struct scatterlist))
3243 Serge 241
 
3747 Serge 242
/*
243
 * sg page iterator
244
 *
245
 * Iterates over sg entries page-by-page.  On each successful iteration,
246
 * you can call sg_page_iter_page(@piter) and sg_page_iter_dma_address(@piter)
247
 * to get the current page and its dma address. @piter->sg will point to the
248
 * sg holding this page and @piter->sg_pgoffset to the page's page offset
249
 * within the sg. The iteration will stop either when a maximum number of sg
250
 * entries was reached or a terminating sg (sg_last(sg) == true) was reached.
251
 */
252
struct sg_page_iter {
253
	struct scatterlist	*sg;		/* sg holding the page */
254
	unsigned int		sg_pgoffset;	/* page offset within the sg */
3243 Serge 255
 
3747 Serge 256
	/* these are internal states, keep away */
257
	unsigned int		__nents;	/* remaining sg entries */
258
	int			__pg_advance;	/* nr pages to advance at the
259
						 * next step */
260
};
261
 
262
bool __sg_page_iter_next(struct sg_page_iter *piter);
263
void __sg_page_iter_start(struct sg_page_iter *piter,
264
			  struct scatterlist *sglist, unsigned int nents,
265
			  unsigned long pgoffset);
266
/**
267
 * sg_page_iter_page - get the current page held by the page iterator
268
 * @piter:	page iterator holding the page
269
 */
270
static inline struct page *sg_page_iter_page(struct sg_page_iter *piter)
271
{
272
	return nth_page(sg_page(piter->sg), piter->sg_pgoffset);
273
}
274
 
275
/**
276
 * sg_page_iter_dma_address - get the dma address of the current page held by
277
 * the page iterator.
278
 * @piter:	page iterator holding the page
279
 */
280
static inline dma_addr_t sg_page_iter_dma_address(struct sg_page_iter *piter)
281
{
282
	return sg_dma_address(piter->sg) + (piter->sg_pgoffset << PAGE_SHIFT);
283
}
284
 
285
/**
286
 * for_each_sg_page - iterate over the pages of the given sg list
287
 * @sglist:	sglist to iterate over
288
 * @piter:	page iterator to hold current page, sg, sg_pgoffset
289
 * @nents:	maximum number of sg entries to iterate over
290
 * @pgoffset:	starting page offset
291
 */
292
#define for_each_sg_page(sglist, piter, nents, pgoffset)		   \
293
	for (__sg_page_iter_start((piter), (sglist), (nents), (pgoffset)); \
294
	     __sg_page_iter_next(piter);)
295
 
3243 Serge 296
/*
297
 * Mapping sg iterator
298
 *
299
 * Iterates over sg entries mapping page-by-page.  On each successful
300
 * iteration, @miter->page points to the mapped page and
301
 * @miter->length bytes of data can be accessed at @miter->addr.  As
302
 * long as an interation is enclosed between start and stop, the user
303
 * is free to choose control structure and when to stop.
304
 *
305
 * @miter->consumed is set to @miter->length on each iteration.  It
306
 * can be adjusted if the user can't consume all the bytes in one go.
307
 * Also, a stopped iteration can be resumed by calling next on it.
308
 * This is useful when iteration needs to release all resources and
309
 * continue later (e.g. at the next interrupt).
310
 */
311
 
312
#define SG_MITER_ATOMIC		(1 << 0)	 /* use kmap_atomic */
313
#define SG_MITER_TO_SG		(1 << 1)	/* flush back to phys on unmap */
314
#define SG_MITER_FROM_SG	(1 << 2)	/* nop */
315
 
316
struct sg_mapping_iter {
317
	/* the following three fields can be accessed directly */
318
	struct page		*page;		/* currently mapped page */
319
	void			*addr;		/* pointer to the mapped area */
320
	size_t			length;		/* length of the mapped area */
321
	size_t			consumed;	/* number of consumed bytes */
3747 Serge 322
	struct sg_page_iter	piter;		/* page iterator */
3243 Serge 323
 
324
	/* these are internal states, keep away */
3747 Serge 325
	unsigned int		__offset;	/* offset within page */
326
	unsigned int		__remaining;	/* remaining bytes on page */
3243 Serge 327
	unsigned int		__flags;
328
};
329
 
330
void sg_miter_start(struct sg_mapping_iter *miter, struct scatterlist *sgl,
331
		    unsigned int nents, unsigned int flags);
4568 Serge 332
bool sg_miter_skip(struct sg_mapping_iter *miter, off_t offset);
3243 Serge 333
bool sg_miter_next(struct sg_mapping_iter *miter);
334
void sg_miter_stop(struct sg_mapping_iter *miter);
335
 
336
#endif /* _LINUX_SCATTERLIST_H */