Rev 5270 | Rev 6934 | Go to most recent revision | Show entire file | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 5270 | Rev 6082 | ||
---|---|---|---|
Line 33... | Line 33... | ||
33 | */ |
33 | */ |
Line 34... | Line 34... | ||
34 | 34 | ||
35 | #include |
35 | #include |
36 | //#include |
36 | //#include |
- | 37 | #include |
|
37 | #include |
38 | #include |
Line 38... | Line 39... | ||
38 | #include |
39 | #include |
39 | 40 | ||
40 | /* |
41 | /* |
Line 106... | Line 107... | ||
106 | static inline unsigned __read_seqcount_begin(const seqcount_t *s) |
107 | static inline unsigned __read_seqcount_begin(const seqcount_t *s) |
107 | { |
108 | { |
108 | unsigned ret; |
109 | unsigned ret; |
Line 109... | Line 110... | ||
109 | 110 | ||
110 | repeat: |
111 | repeat: |
111 | ret = ACCESS_ONCE(s->sequence); |
112 | ret = READ_ONCE(s->sequence); |
112 | if (unlikely(ret & 1)) { |
113 | if (unlikely(ret & 1)) { |
113 | cpu_relax(); |
114 | cpu_relax(); |
114 | goto repeat; |
115 | goto repeat; |
115 | } |
116 | } |
Line 125... | Line 126... | ||
125 | * seqcount without any lockdep checking and without checking or |
126 | * seqcount without any lockdep checking and without checking or |
126 | * masking the LSB. Calling code is responsible for handling that. |
127 | * masking the LSB. Calling code is responsible for handling that. |
127 | */ |
128 | */ |
128 | static inline unsigned raw_read_seqcount(const seqcount_t *s) |
129 | static inline unsigned raw_read_seqcount(const seqcount_t *s) |
129 | { |
130 | { |
130 | unsigned ret = ACCESS_ONCE(s->sequence); |
131 | unsigned ret = READ_ONCE(s->sequence); |
131 | smp_rmb(); |
132 | smp_rmb(); |
132 | return ret; |
133 | return ret; |
133 | } |
134 | } |
Line 134... | Line 135... | ||
134 | 135 | ||
Line 177... | Line 178... | ||
177 | * read_seqcount_retry() instead of stabilizing at the beginning of the |
178 | * read_seqcount_retry() instead of stabilizing at the beginning of the |
178 | * critical section. |
179 | * critical section. |
179 | */ |
180 | */ |
180 | static inline unsigned raw_seqcount_begin(const seqcount_t *s) |
181 | static inline unsigned raw_seqcount_begin(const seqcount_t *s) |
181 | { |
182 | { |
182 | unsigned ret = ACCESS_ONCE(s->sequence); |
183 | unsigned ret = READ_ONCE(s->sequence); |
183 | smp_rmb(); |
184 | smp_rmb(); |
184 | return ret & ~1; |
185 | return ret & ~1; |
185 | } |
186 | } |
Line 186... | Line 187... | ||
186 | 187 | ||
Line 234... | Line 235... | ||
234 | } |
235 | } |
Line 235... | Line 236... | ||
235 | 236 | ||
236 | /* |
237 | /* |
237 | * raw_write_seqcount_latch - redirect readers to even/odd copy |
238 | * raw_write_seqcount_latch - redirect readers to even/odd copy |
- | 239 | * @s: pointer to seqcount_t |
|
- | 240 | * |
|
- | 241 | * The latch technique is a multiversion concurrency control method that allows |
|
- | 242 | * queries during non-atomic modifications. If you can guarantee queries never |
|
- | 243 | * interrupt the modification -- e.g. the concurrency is strictly between CPUs |
|
- | 244 | * -- you most likely do not need this. |
|
- | 245 | * |
|
- | 246 | * Where the traditional RCU/lockless data structures rely on atomic |
|
- | 247 | * modifications to ensure queries observe either the old or the new state the |
|
- | 248 | * latch allows the same for non-atomic updates. The trade-off is doubling the |
|
- | 249 | * cost of storage; we have to maintain two copies of the entire data |
|
- | 250 | * structure. |
|
- | 251 | * |
|
- | 252 | * Very simply put: we first modify one copy and then the other. This ensures |
|
- | 253 | * there is always one copy in a stable state, ready to give us an answer. |
|
- | 254 | * |
|
- | 255 | * The basic form is a data structure like: |
|
- | 256 | * |
|
- | 257 | * struct latch_struct { |
|
- | 258 | * seqcount_t seq; |
|
- | 259 | * struct data_struct data[2]; |
|
- | 260 | * }; |
|
- | 261 | * |
|
- | 262 | * Where a modification, which is assumed to be externally serialized, does the |
|
- | 263 | * following: |
|
- | 264 | * |
|
- | 265 | * void latch_modify(struct latch_struct *latch, ...) |
|
- | 266 | * { |
|
- | 267 | * smp_wmb(); <- Ensure that the last data[1] update is visible |
|
- | 268 | * latch->seq++; |
|
- | 269 | * smp_wmb(); <- Ensure that the seqcount update is visible |
|
- | 270 | * |
|
- | 271 | * modify(latch->data[0], ...); |
|
- | 272 | * |
|
- | 273 | * smp_wmb(); <- Ensure that the data[0] update is visible |
|
- | 274 | * latch->seq++; |
|
- | 275 | * smp_wmb(); <- Ensure that the seqcount update is visible |
|
- | 276 | * |
|
- | 277 | * modify(latch->data[1], ...); |
|
- | 278 | * } |
|
- | 279 | * |
|
- | 280 | * The query will have a form like: |
|
- | 281 | * |
|
- | 282 | * struct entry *latch_query(struct latch_struct *latch, ...) |
|
- | 283 | * { |
|
- | 284 | * struct entry *entry; |
|
- | 285 | * unsigned seq, idx; |
|
- | 286 | * |
|
- | 287 | * do { |
|
- | 288 | * seq = lockless_dereference(latch->seq); |
|
- | 289 | * |
|
- | 290 | * idx = seq & 0x01; |
|
- | 291 | * entry = data_query(latch->data[idx], ...); |
|
- | 292 | * |
|
- | 293 | * smp_rmb(); |
|
- | 294 | * } while (seq != latch->seq); |
|
- | 295 | * |
|
- | 296 | * return entry; |
|
- | 297 | * } |
|
- | 298 | * |
|
- | 299 | * So during the modification, queries are first redirected to data[1]. Then we |
|
- | 300 | * modify data[0]. When that is complete, we redirect queries back to data[0] |
|
- | 301 | * and we can modify data[1]. |
|
- | 302 | * |
|
- | 303 | * NOTE: The non-requirement for atomic modifications does _NOT_ include |
|
- | 304 | * the publishing of new entries in the case where data is a dynamic |
|
- | 305 | * data structure. |
|
- | 306 | * |
|
- | 307 | * An iteration might start in data[0] and get suspended long enough |
|
- | 308 | * to miss an entire modification sequence, once it resumes it might |
|
- | 309 | * observe the new entry. |
|
- | 310 | * |
|
- | 311 | * NOTE: When data is a dynamic data structure; one should use regular RCU |
|
238 | * @s: pointer to seqcount_t |
312 | * patterns to manage the lifetimes of the objects within. |
239 | */ |
313 | */ |
240 | static inline void raw_write_seqcount_latch(seqcount_t *s) |
314 | static inline void raw_write_seqcount_latch(seqcount_t *s) |
241 | { |
315 | { |
242 | smp_wmb(); /* prior stores before incrementing "sequence" */ |
316 | smp_wmb(); /* prior stores before incrementing "sequence" */ |
Line 264... | Line 338... | ||
264 | seqcount_release(&s->dep_map, 1, _RET_IP_); |
338 | seqcount_release(&s->dep_map, 1, _RET_IP_); |
265 | raw_write_seqcount_end(s); |
339 | raw_write_seqcount_end(s); |
266 | } |
340 | } |
Line 267... | Line 341... | ||
267 | 341 | ||
268 | /** |
342 | /** |
269 | * write_seqcount_barrier - invalidate in-progress read-side seq operations |
343 | * write_seqcount_invalidate - invalidate in-progress read-side seq operations |
270 | * @s: pointer to seqcount_t |
344 | * @s: pointer to seqcount_t |
271 | * |
345 | * |
272 | * After write_seqcount_barrier, no read-side seq operations will complete |
346 | * After write_seqcount_invalidate, no read-side seq operations will complete |
273 | * successfully and see data older than this. |
347 | * successfully and see data older than this. |
274 | */ |
348 | */ |
275 | static inline void write_seqcount_barrier(seqcount_t *s) |
349 | static inline void write_seqcount_invalidate(seqcount_t *s) |
276 | { |
350 | { |
277 | smp_wmb(); |
351 | smp_wmb(); |
278 | s->sequence+=2; |
352 | s->sequence+=2; |