Subversion Repositories Kolibri OS

Rev

Rev 3747 | Rev 5056 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
3243 Serge 1
#ifndef _LINUX_SCATTERLIST_H
2
#define _LINUX_SCATTERLIST_H
3
 
4
#include 
5
#include 
6
#include 
7
 
8
#include 
9
#include 
10
//#include 
11
 
12
struct sg_table {
13
	struct scatterlist *sgl;	/* the list */
14
	unsigned int nents;		/* number of mapped entries */
15
	unsigned int orig_nents;	/* original size of list */
16
};
17
 
18
/*
19
 * Notes on SG table design.
20
 *
21
 * Architectures must provide an unsigned long page_link field in the
22
 * scatterlist struct. We use that to place the page pointer AND encode
23
 * information about the sg table as well. The two lower bits are reserved
24
 * for this information.
25
 *
26
 * If bit 0 is set, then the page_link contains a pointer to the next sg
27
 * table list. Otherwise the next entry is at sg + 1.
28
 *
29
 * If bit 1 is set, then this sg entry is the last element in a list.
30
 *
31
 * See sg_next().
32
 *
33
 */
34
 
35
#define SG_MAGIC	0x87654321
36
 
37
/*
38
 * We overload the LSB of the page pointer to indicate whether it's
39
 * a valid sg entry, or whether it points to the start of a new scatterlist.
40
 * Those low bits are there for everyone! (thanks mason :-)
41
 */
42
#define sg_is_chain(sg)		((sg)->page_link & 0x01)
43
#define sg_is_last(sg)		((sg)->page_link & 0x02)
44
#define sg_chain_ptr(sg)	\
45
	((struct scatterlist *) ((sg)->page_link & ~0x03))
46
 
47
/**
48
 * sg_assign_page - Assign a given page to an SG entry
49
 * @sg:		    SG entry
50
 * @page:	    The page
51
 *
52
 * Description:
53
 *   Assign page to sg entry. Also see sg_set_page(), the most commonly used
54
 *   variant.
55
 *
56
 **/
57
static inline void sg_assign_page(struct scatterlist *sg, struct page *page)
58
{
59
	unsigned long page_link = sg->page_link & 0x3;
60
 
61
	/*
62
	 * In order for the low bit stealing approach to work, pages
63
	 * must be aligned at a 32-bit boundary as a minimum.
64
	 */
65
	BUG_ON((unsigned long) page & 0x03);
66
#ifdef CONFIG_DEBUG_SG
67
	BUG_ON(sg->sg_magic != SG_MAGIC);
68
	BUG_ON(sg_is_chain(sg));
69
#endif
70
	sg->page_link = page_link | (unsigned long) page;
71
}
72
 
73
/**
74
 * sg_set_page - Set sg entry to point at given page
75
 * @sg:		 SG entry
76
 * @page:	 The page
77
 * @len:	 Length of data
78
 * @offset:	 Offset into page
79
 *
80
 * Description:
81
 *   Use this function to set an sg entry pointing at a page, never assign
82
 *   the page directly. We encode sg table information in the lower bits
83
 *   of the page pointer. See sg_page() for looking up the page belonging
84
 *   to an sg entry.
85
 *
86
 **/
87
static inline void sg_set_page(struct scatterlist *sg, struct page *page,
88
			       unsigned int len, unsigned int offset)
89
{
90
	sg_assign_page(sg, page);
91
	sg->offset = offset;
92
	sg->length = len;
93
}
94
 
95
static inline struct page *sg_page(struct scatterlist *sg)
96
{
97
#ifdef CONFIG_DEBUG_SG
98
	BUG_ON(sg->sg_magic != SG_MAGIC);
99
	BUG_ON(sg_is_chain(sg));
100
#endif
101
	return (struct page *)((sg)->page_link & ~0x3);
102
}
103
 
104
/**
105
 * sg_set_buf - Set sg entry to point at given data
106
 * @sg:		 SG entry
107
 * @buf:	 Data
108
 * @buflen:	 Data length
109
 *
110
 **/
111
//static inline void sg_set_buf(struct scatterlist *sg, const void *buf,
112
//                 unsigned int buflen)
113
//{
114
//   sg_set_page(sg, virt_to_page(buf), buflen, offset_in_page(buf));
115
//}
116
 
117
/*
118
 * Loop over each sg element, following the pointer to a new list if necessary
119
 */
120
#define for_each_sg(sglist, sg, nr, __i)	\
121
	for (__i = 0, sg = (sglist); __i < (nr); __i++, sg = sg_next(sg))
122
 
123
/**
124
 * sg_chain - Chain two sglists together
125
 * @prv:	First scatterlist
126
 * @prv_nents:	Number of entries in prv
127
 * @sgl:	Second scatterlist
128
 *
129
 * Description:
130
 *   Links @prv@ and @sgl@ together, to form a longer scatterlist.
131
 *
132
 **/
133
static inline void sg_chain(struct scatterlist *prv, unsigned int prv_nents,
134
			    struct scatterlist *sgl)
135
{
136
#ifndef ARCH_HAS_SG_CHAIN
137
	BUG();
138
#endif
139
 
140
	/*
141
	 * offset and length are unused for chain entry.  Clear them.
142
	 */
143
	prv[prv_nents - 1].offset = 0;
144
	prv[prv_nents - 1].length = 0;
145
 
146
	/*
147
	 * Set lowest bit to indicate a link pointer, and make sure to clear
148
	 * the termination bit if it happens to be set.
149
	 */
150
	prv[prv_nents - 1].page_link = ((unsigned long) sgl | 0x01) & ~0x02;
151
}
152
 
153
/**
154
 * sg_mark_end - Mark the end of the scatterlist
155
 * @sg:		 SG entryScatterlist
156
 *
157
 * Description:
158
 *   Marks the passed in sg entry as the termination point for the sg
159
 *   table. A call to sg_next() on this entry will return NULL.
160
 *
161
 **/
162
static inline void sg_mark_end(struct scatterlist *sg)
163
{
164
#ifdef CONFIG_DEBUG_SG
165
	BUG_ON(sg->sg_magic != SG_MAGIC);
166
#endif
167
	/*
168
	 * Set termination bit, clear potential chain bit
169
	 */
170
	sg->page_link |= 0x02;
171
	sg->page_link &= ~0x01;
172
}
173
 
174
/**
3747 Serge 175
 * sg_unmark_end - Undo setting the end of the scatterlist
176
 * @sg:		 SG entryScatterlist
177
 *
178
 * Description:
179
 *   Removes the termination marker from the given entry of the scatterlist.
180
 *
181
 **/
182
static inline void sg_unmark_end(struct scatterlist *sg)
183
{
184
#ifdef CONFIG_DEBUG_SG
185
	BUG_ON(sg->sg_magic != SG_MAGIC);
186
#endif
187
	sg->page_link &= ~0x02;
188
}
189
 
190
/**
3243 Serge 191
 * sg_phys - Return physical address of an sg entry
192
 * @sg:	     SG entry
193
 *
194
 * Description:
195
 *   This calls page_to_phys() on the page in this sg entry, and adds the
196
 *   sg offset. The caller must know that it is legal to call page_to_phys()
197
 *   on the sg page.
198
 *
199
 **/
200
static inline dma_addr_t sg_phys(struct scatterlist *sg)
201
{
202
	return page_to_phys(sg_page(sg)) + sg->offset;
203
}
204
 
205
/**
206
 * sg_virt - Return virtual address of an sg entry
207
 * @sg:      SG entry
208
 *
209
 * Description:
210
 *   This calls page_address() on the page in this sg entry, and adds the
211
 *   sg offset. The caller must know that the sg page has a valid virtual
212
 *   mapping.
213
 *
214
 **/
215
//static inline void *sg_virt(struct scatterlist *sg)
216
//{
217
//   return page_address(sg_page(sg)) + sg->offset;
218
//}
219
 
220
int sg_nents(struct scatterlist *sg);
221
struct scatterlist *sg_next(struct scatterlist *);
222
struct scatterlist *sg_last(struct scatterlist *s, unsigned int);
223
void sg_init_table(struct scatterlist *, unsigned int);
224
void sg_init_one(struct scatterlist *, const void *, unsigned int);
225
 
226
typedef struct scatterlist *(sg_alloc_fn)(unsigned int, gfp_t);
227
typedef void (sg_free_fn)(struct scatterlist *, unsigned int);
228
 
229
void __sg_free_table(struct sg_table *, unsigned int, sg_free_fn *);
230
void sg_free_table(struct sg_table *);
231
int __sg_alloc_table(struct sg_table *, unsigned int, unsigned int, gfp_t,
232
		     sg_alloc_fn *);
233
int sg_alloc_table(struct sg_table *, unsigned int, gfp_t);
234
int sg_alloc_table_from_pages(struct sg_table *sgt,
235
	struct page **pages, unsigned int n_pages,
236
	unsigned long offset, unsigned long size,
237
	gfp_t gfp_mask);
238
 
239
size_t sg_copy_from_buffer(struct scatterlist *sgl, unsigned int nents,
240
			   void *buf, size_t buflen);
241
size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents,
242
			 void *buf, size_t buflen);
243
 
244
/*
245
 * Maximum number of entries that will be allocated in one piece, if
246
 * a list larger than this is required then chaining will be utilized.
247
 */
3297 Serge 248
#define SG_MAX_SINGLE_ALLOC     (4*PAGE_SIZE / sizeof(struct scatterlist))
3243 Serge 249
 
3747 Serge 250
/*
251
 * sg page iterator
252
 *
253
 * Iterates over sg entries page-by-page.  On each successful iteration,
254
 * you can call sg_page_iter_page(@piter) and sg_page_iter_dma_address(@piter)
255
 * to get the current page and its dma address. @piter->sg will point to the
256
 * sg holding this page and @piter->sg_pgoffset to the page's page offset
257
 * within the sg. The iteration will stop either when a maximum number of sg
258
 * entries was reached or a terminating sg (sg_last(sg) == true) was reached.
259
 */
260
struct sg_page_iter {
261
	struct scatterlist	*sg;		/* sg holding the page */
262
	unsigned int		sg_pgoffset;	/* page offset within the sg */
3243 Serge 263
 
3747 Serge 264
	/* these are internal states, keep away */
265
	unsigned int		__nents;	/* remaining sg entries */
266
	int			__pg_advance;	/* nr pages to advance at the
267
						 * next step */
268
};
269
 
270
bool __sg_page_iter_next(struct sg_page_iter *piter);
271
void __sg_page_iter_start(struct sg_page_iter *piter,
272
			  struct scatterlist *sglist, unsigned int nents,
273
			  unsigned long pgoffset);
274
/**
275
 * sg_page_iter_page - get the current page held by the page iterator
276
 * @piter:	page iterator holding the page
277
 */
278
static inline struct page *sg_page_iter_page(struct sg_page_iter *piter)
279
{
280
	return nth_page(sg_page(piter->sg), piter->sg_pgoffset);
281
}
282
 
283
/**
284
 * sg_page_iter_dma_address - get the dma address of the current page held by
285
 * the page iterator.
286
 * @piter:	page iterator holding the page
287
 */
288
static inline dma_addr_t sg_page_iter_dma_address(struct sg_page_iter *piter)
289
{
290
	return sg_dma_address(piter->sg) + (piter->sg_pgoffset << PAGE_SHIFT);
291
}
292
 
293
/**
294
 * for_each_sg_page - iterate over the pages of the given sg list
295
 * @sglist:	sglist to iterate over
296
 * @piter:	page iterator to hold current page, sg, sg_pgoffset
297
 * @nents:	maximum number of sg entries to iterate over
298
 * @pgoffset:	starting page offset
299
 */
300
#define for_each_sg_page(sglist, piter, nents, pgoffset)		   \
301
	for (__sg_page_iter_start((piter), (sglist), (nents), (pgoffset)); \
302
	     __sg_page_iter_next(piter);)
303
 
3243 Serge 304
/*
305
 * Mapping sg iterator
306
 *
307
 * Iterates over sg entries mapping page-by-page.  On each successful
308
 * iteration, @miter->page points to the mapped page and
309
 * @miter->length bytes of data can be accessed at @miter->addr.  As
310
 * long as an interation is enclosed between start and stop, the user
311
 * is free to choose control structure and when to stop.
312
 *
313
 * @miter->consumed is set to @miter->length on each iteration.  It
314
 * can be adjusted if the user can't consume all the bytes in one go.
315
 * Also, a stopped iteration can be resumed by calling next on it.
316
 * This is useful when iteration needs to release all resources and
317
 * continue later (e.g. at the next interrupt).
318
 */
319
 
320
#define SG_MITER_ATOMIC		(1 << 0)	 /* use kmap_atomic */
321
#define SG_MITER_TO_SG		(1 << 1)	/* flush back to phys on unmap */
322
#define SG_MITER_FROM_SG	(1 << 2)	/* nop */
323
 
324
struct sg_mapping_iter {
325
	/* the following three fields can be accessed directly */
326
	struct page		*page;		/* currently mapped page */
327
	void			*addr;		/* pointer to the mapped area */
328
	size_t			length;		/* length of the mapped area */
329
	size_t			consumed;	/* number of consumed bytes */
3747 Serge 330
	struct sg_page_iter	piter;		/* page iterator */
3243 Serge 331
 
332
	/* these are internal states, keep away */
3747 Serge 333
	unsigned int		__offset;	/* offset within page */
334
	unsigned int		__remaining;	/* remaining bytes on page */
3243 Serge 335
	unsigned int		__flags;
336
};
337
 
338
void sg_miter_start(struct sg_mapping_iter *miter, struct scatterlist *sgl,
339
		    unsigned int nents, unsigned int flags);
4568 Serge 340
bool sg_miter_skip(struct sg_mapping_iter *miter, off_t offset);
3243 Serge 341
bool sg_miter_next(struct sg_mapping_iter *miter);
342
void sg_miter_stop(struct sg_mapping_iter *miter);
343
 
344
#endif /* _LINUX_SCATTERLIST_H */