Subversion Repositories Kolibri OS

Rev

Rev 6082 | Go to most recent revision | Details | Last modification | View Log | RSS feed

Rev Author Line No. Line
5270 serge 1
#ifndef __LINUX_CPUMASK_H
2
#define __LINUX_CPUMASK_H
3
 
4
/*
5
 * Cpumasks provide a bitmap suitable for representing the
6
 * set of CPU's in a system, one bit position per CPU number.  In general,
7
 * only nr_cpu_ids (<= NR_CPUS) bits are valid.
8
 */
9
#include 
10
#include 
11
#include 
12
#include 
13
 
14
typedef struct cpumask { DECLARE_BITMAP(bits, NR_CPUS); } cpumask_t;
15
 
16
/**
17
 * cpumask_bits - get the bits in a cpumask
18
 * @maskp: the struct cpumask *
19
 *
20
 * You should only assume nr_cpu_ids bits of this mask are valid.  This is
21
 * a macro so it's const-correct.
22
 */
23
#define cpumask_bits(maskp) ((maskp)->bits)
24
 
25
#if NR_CPUS == 1
26
#define nr_cpu_ids		1
27
#else
28
extern int nr_cpu_ids;
29
#endif
30
 
31
#ifdef CONFIG_CPUMASK_OFFSTACK
32
/* Assuming NR_CPUS is huge, a runtime limit is more efficient.  Also,
33
 * not all bits may be allocated. */
34
#define nr_cpumask_bits	nr_cpu_ids
35
#else
36
#define nr_cpumask_bits	NR_CPUS
37
#endif
38
 
39
/*
40
 * The following particular system cpumasks and operations manage
41
 * possible, present, active and online cpus.
42
 *
43
 *     cpu_possible_mask- has bit 'cpu' set iff cpu is populatable
44
 *     cpu_present_mask - has bit 'cpu' set iff cpu is populated
45
 *     cpu_online_mask  - has bit 'cpu' set iff cpu available to scheduler
46
 *     cpu_active_mask  - has bit 'cpu' set iff cpu available to migration
47
 *
48
 *  If !CONFIG_HOTPLUG_CPU, present == possible, and active == online.
49
 *
50
 *  The cpu_possible_mask is fixed at boot time, as the set of CPU id's
51
 *  that it is possible might ever be plugged in at anytime during the
52
 *  life of that system boot.  The cpu_present_mask is dynamic(*),
53
 *  representing which CPUs are currently plugged in.  And
54
 *  cpu_online_mask is the dynamic subset of cpu_present_mask,
55
 *  indicating those CPUs available for scheduling.
56
 *
57
 *  If HOTPLUG is enabled, then cpu_possible_mask is forced to have
58
 *  all NR_CPUS bits set, otherwise it is just the set of CPUs that
59
 *  ACPI reports present at boot.
60
 *
61
 *  If HOTPLUG is enabled, then cpu_present_mask varies dynamically,
62
 *  depending on what ACPI reports as currently plugged in, otherwise
63
 *  cpu_present_mask is just a copy of cpu_possible_mask.
64
 *
65
 *  (*) Well, cpu_present_mask is dynamic in the hotplug case.  If not
66
 *      hotplug, it's a copy of cpu_possible_mask, hence fixed at boot.
67
 *
68
 * Subtleties:
69
 * 1) UP arch's (NR_CPUS == 1, CONFIG_SMP not defined) hardcode
70
 *    assumption that their single CPU is online.  The UP
71
 *    cpu_{online,possible,present}_masks are placebos.  Changing them
72
 *    will have no useful affect on the following num_*_cpus()
73
 *    and cpu_*() macros in the UP case.  This ugliness is a UP
74
 *    optimization - don't waste any instructions or memory references
75
 *    asking if you're online or how many CPUs there are if there is
76
 *    only one CPU.
77
 */
78
 
79
extern const struct cpumask *const cpu_possible_mask;
80
extern const struct cpumask *const cpu_online_mask;
81
extern const struct cpumask *const cpu_present_mask;
82
extern const struct cpumask *const cpu_active_mask;
83
 
84
#if NR_CPUS > 1
85
#define num_online_cpus()	cpumask_weight(cpu_online_mask)
86
#define num_possible_cpus()	cpumask_weight(cpu_possible_mask)
87
#define num_present_cpus()	cpumask_weight(cpu_present_mask)
88
#define num_active_cpus()	cpumask_weight(cpu_active_mask)
89
#define cpu_online(cpu)		cpumask_test_cpu((cpu), cpu_online_mask)
90
#define cpu_possible(cpu)	cpumask_test_cpu((cpu), cpu_possible_mask)
91
#define cpu_present(cpu)	cpumask_test_cpu((cpu), cpu_present_mask)
92
#define cpu_active(cpu)		cpumask_test_cpu((cpu), cpu_active_mask)
93
#else
94
#define num_online_cpus()	1U
95
#define num_possible_cpus()	1U
96
#define num_present_cpus()	1U
97
#define num_active_cpus()	1U
98
#define cpu_online(cpu)		((cpu) == 0)
99
#define cpu_possible(cpu)	((cpu) == 0)
100
#define cpu_present(cpu)	((cpu) == 0)
101
#define cpu_active(cpu)		((cpu) == 0)
102
#endif
103
 
104
/* verify cpu argument to cpumask_* operators */
105
static inline unsigned int cpumask_check(unsigned int cpu)
106
{
107
#ifdef CONFIG_DEBUG_PER_CPU_MAPS
108
	WARN_ON_ONCE(cpu >= nr_cpumask_bits);
109
#endif /* CONFIG_DEBUG_PER_CPU_MAPS */
110
	return cpu;
111
}
112
 
113
#if NR_CPUS == 1
114
/* Uniprocessor.  Assume all masks are "1". */
115
static inline unsigned int cpumask_first(const struct cpumask *srcp)
116
{
117
	return 0;
118
}
119
 
120
/* Valid inputs for n are -1 and 0. */
121
static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
122
{
123
	return n+1;
124
}
125
 
126
static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
127
{
128
	return n+1;
129
}
130
 
131
static inline unsigned int cpumask_next_and(int n,
132
					    const struct cpumask *srcp,
133
					    const struct cpumask *andp)
134
{
135
	return n+1;
136
}
137
 
138
/* cpu must be a valid cpu, ie 0, so there's no other choice. */
139
static inline unsigned int cpumask_any_but(const struct cpumask *mask,
140
					   unsigned int cpu)
141
{
142
	return 1;
143
}
144
 
145
static inline int cpumask_set_cpu_local_first(int i, int numa_node, cpumask_t *dstp)
146
{
147
	set_bit(0, cpumask_bits(dstp));
148
 
149
	return 0;
150
}
151
 
152
#define for_each_cpu(cpu, mask)			\
153
	for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask)
154
#define for_each_cpu_not(cpu, mask)		\
155
	for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask)
156
#define for_each_cpu_and(cpu, mask, and)	\
157
	for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask, (void)and)
158
#else
159
/**
160
 * cpumask_first - get the first cpu in a cpumask
161
 * @srcp: the cpumask pointer
162
 *
163
 * Returns >= nr_cpu_ids if no cpus set.
164
 */
165
static inline unsigned int cpumask_first(const struct cpumask *srcp)
166
{
167
	return find_first_bit(cpumask_bits(srcp), nr_cpumask_bits);
168
}
169
 
170
/**
171
 * cpumask_next - get the next cpu in a cpumask
172
 * @n: the cpu prior to the place to search (ie. return will be > @n)
173
 * @srcp: the cpumask pointer
174
 *
175
 * Returns >= nr_cpu_ids if no further cpus set.
176
 */
177
static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
178
{
179
	/* -1 is a legal arg here. */
180
	if (n != -1)
181
		cpumask_check(n);
182
	return find_next_bit(cpumask_bits(srcp), nr_cpumask_bits, n+1);
183
}
184
 
185
/**
186
 * cpumask_next_zero - get the next unset cpu in a cpumask
187
 * @n: the cpu prior to the place to search (ie. return will be > @n)
188
 * @srcp: the cpumask pointer
189
 *
190
 * Returns >= nr_cpu_ids if no further cpus unset.
191
 */
192
static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
193
{
194
	/* -1 is a legal arg here. */
195
	if (n != -1)
196
		cpumask_check(n);
197
	return find_next_zero_bit(cpumask_bits(srcp), nr_cpumask_bits, n+1);
198
}
199
 
200
int cpumask_next_and(int n, const struct cpumask *, const struct cpumask *);
201
int cpumask_any_but(const struct cpumask *mask, unsigned int cpu);
202
int cpumask_set_cpu_local_first(int i, int numa_node, cpumask_t *dstp);
203
 
204
/**
205
 * for_each_cpu - iterate over every cpu in a mask
206
 * @cpu: the (optionally unsigned) integer iterator
207
 * @mask: the cpumask pointer
208
 *
209
 * After the loop, cpu is >= nr_cpu_ids.
210
 */
211
#define for_each_cpu(cpu, mask)				\
212
	for ((cpu) = -1;				\
213
		(cpu) = cpumask_next((cpu), (mask)),	\
214
		(cpu) < nr_cpu_ids;)
215
 
216
/**
217
 * for_each_cpu_not - iterate over every cpu in a complemented mask
218
 * @cpu: the (optionally unsigned) integer iterator
219
 * @mask: the cpumask pointer
220
 *
221
 * After the loop, cpu is >= nr_cpu_ids.
222
 */
223
#define for_each_cpu_not(cpu, mask)				\
224
	for ((cpu) = -1;					\
225
		(cpu) = cpumask_next_zero((cpu), (mask)),	\
226
		(cpu) < nr_cpu_ids;)
227
 
228
/**
229
 * for_each_cpu_and - iterate over every cpu in both masks
230
 * @cpu: the (optionally unsigned) integer iterator
231
 * @mask: the first cpumask pointer
232
 * @and: the second cpumask pointer
233
 *
234
 * This saves a temporary CPU mask in many places.  It is equivalent to:
235
 *	struct cpumask tmp;
236
 *	cpumask_and(&tmp, &mask, &and);
237
 *	for_each_cpu(cpu, &tmp)
238
 *		...
239
 *
240
 * After the loop, cpu is >= nr_cpu_ids.
241
 */
242
#define for_each_cpu_and(cpu, mask, and)				\
243
	for ((cpu) = -1;						\
244
		(cpu) = cpumask_next_and((cpu), (mask), (and)),		\
245
		(cpu) < nr_cpu_ids;)
246
#endif /* SMP */
247
 
248
#define CPU_BITS_NONE						\
249
{								\
250
	[0 ... BITS_TO_LONGS(NR_CPUS)-1] = 0UL			\
251
}
252
 
253
#define CPU_BITS_CPU0						\
254
{								\
255
	[0] =  1UL						\
256
}
257
 
258
/**
259
 * cpumask_set_cpu - set a cpu in a cpumask
260
 * @cpu: cpu number (< nr_cpu_ids)
261
 * @dstp: the cpumask pointer
262
 */
263
static inline void cpumask_set_cpu(unsigned int cpu, struct cpumask *dstp)
264
{
265
	set_bit(cpumask_check(cpu), cpumask_bits(dstp));
266
}
267
 
268
/**
269
 * cpumask_clear_cpu - clear a cpu in a cpumask
270
 * @cpu: cpu number (< nr_cpu_ids)
271
 * @dstp: the cpumask pointer
272
 */
273
static inline void cpumask_clear_cpu(int cpu, struct cpumask *dstp)
274
{
275
	clear_bit(cpumask_check(cpu), cpumask_bits(dstp));
276
}
277
 
278
/**
279
 * cpumask_test_cpu - test for a cpu in a cpumask
280
 * @cpu: cpu number (< nr_cpu_ids)
281
 * @cpumask: the cpumask pointer
282
 *
283
 * Returns 1 if @cpu is set in @cpumask, else returns 0
284
 *
285
 * No static inline type checking - see Subtlety (1) above.
286
 */
287
#define cpumask_test_cpu(cpu, cpumask) \
288
	test_bit(cpumask_check(cpu), cpumask_bits((cpumask)))
289
 
290
/**
291
 * cpumask_test_and_set_cpu - atomically test and set a cpu in a cpumask
292
 * @cpu: cpu number (< nr_cpu_ids)
293
 * @cpumask: the cpumask pointer
294
 *
295
 * Returns 1 if @cpu is set in old bitmap of @cpumask, else returns 0
296
 *
297
 * test_and_set_bit wrapper for cpumasks.
298
 */
299
static inline int cpumask_test_and_set_cpu(int cpu, struct cpumask *cpumask)
300
{
301
	return test_and_set_bit(cpumask_check(cpu), cpumask_bits(cpumask));
302
}
303
 
304
/**
305
 * cpumask_test_and_clear_cpu - atomically test and clear a cpu in a cpumask
306
 * @cpu: cpu number (< nr_cpu_ids)
307
 * @cpumask: the cpumask pointer
308
 *
309
 * Returns 1 if @cpu is set in old bitmap of @cpumask, else returns 0
310
 *
311
 * test_and_clear_bit wrapper for cpumasks.
312
 */
313
static inline int cpumask_test_and_clear_cpu(int cpu, struct cpumask *cpumask)
314
{
315
	return test_and_clear_bit(cpumask_check(cpu), cpumask_bits(cpumask));
316
}
317
 
318
/**
319
 * cpumask_setall - set all cpus (< nr_cpu_ids) in a cpumask
320
 * @dstp: the cpumask pointer
321
 */
322
static inline void cpumask_setall(struct cpumask *dstp)
323
{
324
	bitmap_fill(cpumask_bits(dstp), nr_cpumask_bits);
325
}
326
 
327
/**
328
 * cpumask_clear - clear all cpus (< nr_cpu_ids) in a cpumask
329
 * @dstp: the cpumask pointer
330
 */
331
static inline void cpumask_clear(struct cpumask *dstp)
332
{
333
	bitmap_zero(cpumask_bits(dstp), nr_cpumask_bits);
334
}
335
 
336
/**
337
 * cpumask_and - *dstp = *src1p & *src2p
338
 * @dstp: the cpumask result
339
 * @src1p: the first input
340
 * @src2p: the second input
341
 *
342
 * If *@dstp is empty, returns 0, else returns 1
343
 */
344
static inline int cpumask_and(struct cpumask *dstp,
345
			       const struct cpumask *src1p,
346
			       const struct cpumask *src2p)
347
{
348
	return bitmap_and(cpumask_bits(dstp), cpumask_bits(src1p),
349
				       cpumask_bits(src2p), nr_cpumask_bits);
350
}
351
 
352
/**
353
 * cpumask_or - *dstp = *src1p | *src2p
354
 * @dstp: the cpumask result
355
 * @src1p: the first input
356
 * @src2p: the second input
357
 */
358
static inline void cpumask_or(struct cpumask *dstp, const struct cpumask *src1p,
359
			      const struct cpumask *src2p)
360
{
361
	bitmap_or(cpumask_bits(dstp), cpumask_bits(src1p),
362
				      cpumask_bits(src2p), nr_cpumask_bits);
363
}
364
 
365
/**
366
 * cpumask_xor - *dstp = *src1p ^ *src2p
367
 * @dstp: the cpumask result
368
 * @src1p: the first input
369
 * @src2p: the second input
370
 */
371
static inline void cpumask_xor(struct cpumask *dstp,
372
			       const struct cpumask *src1p,
373
			       const struct cpumask *src2p)
374
{
375
	bitmap_xor(cpumask_bits(dstp), cpumask_bits(src1p),
376
				       cpumask_bits(src2p), nr_cpumask_bits);
377
}
378
 
379
/**
380
 * cpumask_andnot - *dstp = *src1p & ~*src2p
381
 * @dstp: the cpumask result
382
 * @src1p: the first input
383
 * @src2p: the second input
384
 *
385
 * If *@dstp is empty, returns 0, else returns 1
386
 */
387
static inline int cpumask_andnot(struct cpumask *dstp,
388
				  const struct cpumask *src1p,
389
				  const struct cpumask *src2p)
390
{
391
	return bitmap_andnot(cpumask_bits(dstp), cpumask_bits(src1p),
392
					  cpumask_bits(src2p), nr_cpumask_bits);
393
}
394
 
395
/**
396
 * cpumask_complement - *dstp = ~*srcp
397
 * @dstp: the cpumask result
398
 * @srcp: the input to invert
399
 */
400
static inline void cpumask_complement(struct cpumask *dstp,
401
				      const struct cpumask *srcp)
402
{
403
	bitmap_complement(cpumask_bits(dstp), cpumask_bits(srcp),
404
					      nr_cpumask_bits);
405
}
406
 
407
/**
408
 * cpumask_equal - *src1p == *src2p
409
 * @src1p: the first input
410
 * @src2p: the second input
411
 */
412
static inline bool cpumask_equal(const struct cpumask *src1p,
413
				const struct cpumask *src2p)
414
{
415
	return bitmap_equal(cpumask_bits(src1p), cpumask_bits(src2p),
416
						 nr_cpumask_bits);
417
}
418
 
419
/**
420
 * cpumask_intersects - (*src1p & *src2p) != 0
421
 * @src1p: the first input
422
 * @src2p: the second input
423
 */
424
static inline bool cpumask_intersects(const struct cpumask *src1p,
425
				     const struct cpumask *src2p)
426
{
427
	return bitmap_intersects(cpumask_bits(src1p), cpumask_bits(src2p),
428
						      nr_cpumask_bits);
429
}
430
 
431
/**
432
 * cpumask_subset - (*src1p & ~*src2p) == 0
433
 * @src1p: the first input
434
 * @src2p: the second input
435
 *
436
 * Returns 1 if *@src1p is a subset of *@src2p, else returns 0
437
 */
438
static inline int cpumask_subset(const struct cpumask *src1p,
439
				 const struct cpumask *src2p)
440
{
441
	return bitmap_subset(cpumask_bits(src1p), cpumask_bits(src2p),
442
						  nr_cpumask_bits);
443
}
444
 
445
/**
446
 * cpumask_empty - *srcp == 0
447
 * @srcp: the cpumask to that all cpus < nr_cpu_ids are clear.
448
 */
449
static inline bool cpumask_empty(const struct cpumask *srcp)
450
{
451
	return bitmap_empty(cpumask_bits(srcp), nr_cpumask_bits);
452
}
453
 
454
/**
455
 * cpumask_full - *srcp == 0xFFFFFFFF...
456
 * @srcp: the cpumask to that all cpus < nr_cpu_ids are set.
457
 */
458
static inline bool cpumask_full(const struct cpumask *srcp)
459
{
460
	return bitmap_full(cpumask_bits(srcp), nr_cpumask_bits);
461
}
462
 
463
/**
464
 * cpumask_weight - Count of bits in *srcp
465
 * @srcp: the cpumask to count bits (< nr_cpu_ids) in.
466
 */
467
static inline unsigned int cpumask_weight(const struct cpumask *srcp)
468
{
469
	return bitmap_weight(cpumask_bits(srcp), nr_cpumask_bits);
470
}
471
 
472
/**
473
 * cpumask_shift_right - *dstp = *srcp >> n
474
 * @dstp: the cpumask result
475
 * @srcp: the input to shift
476
 * @n: the number of bits to shift by
477
 */
478
static inline void cpumask_shift_right(struct cpumask *dstp,
479
				       const struct cpumask *srcp, int n)
480
{
481
	bitmap_shift_right(cpumask_bits(dstp), cpumask_bits(srcp), n,
482
					       nr_cpumask_bits);
483
}
484
 
485
/**
486
 * cpumask_shift_left - *dstp = *srcp << n
487
 * @dstp: the cpumask result
488
 * @srcp: the input to shift
489
 * @n: the number of bits to shift by
490
 */
491
static inline void cpumask_shift_left(struct cpumask *dstp,
492
				      const struct cpumask *srcp, int n)
493
{
494
	bitmap_shift_left(cpumask_bits(dstp), cpumask_bits(srcp), n,
495
					      nr_cpumask_bits);
496
}
497
 
498
/**
499
 * cpumask_copy - *dstp = *srcp
500
 * @dstp: the result
501
 * @srcp: the input cpumask
502
 */
503
static inline void cpumask_copy(struct cpumask *dstp,
504
				const struct cpumask *srcp)
505
{
506
	bitmap_copy(cpumask_bits(dstp), cpumask_bits(srcp), nr_cpumask_bits);
507
}
508
 
509
/**
510
 * cpumask_any - pick a "random" cpu from *srcp
511
 * @srcp: the input cpumask
512
 *
513
 * Returns >= nr_cpu_ids if no cpus set.
514
 */
515
#define cpumask_any(srcp) cpumask_first(srcp)
516
 
517
/**
518
 * cpumask_first_and - return the first cpu from *srcp1 & *srcp2
519
 * @src1p: the first input
520
 * @src2p: the second input
521
 *
522
 * Returns >= nr_cpu_ids if no cpus set in both.  See also cpumask_next_and().
523
 */
524
#define cpumask_first_and(src1p, src2p) cpumask_next_and(-1, (src1p), (src2p))
525
 
526
/**
527
 * cpumask_any_and - pick a "random" cpu from *mask1 & *mask2
528
 * @mask1: the first input cpumask
529
 * @mask2: the second input cpumask
530
 *
531
 * Returns >= nr_cpu_ids if no cpus set.
532
 */
533
#define cpumask_any_and(mask1, mask2) cpumask_first_and((mask1), (mask2))
534
 
535
/**
536
 * cpumask_of - the cpumask containing just a given cpu
537
 * @cpu: the cpu (<= nr_cpu_ids)
538
 */
539
#define cpumask_of(cpu) (get_cpu_mask(cpu))
540
 
541
/**
542
 * cpumask_scnprintf - print a cpumask into a string as comma-separated hex
543
 * @buf: the buffer to sprintf into
544
 * @len: the length of the buffer
545
 * @srcp: the cpumask to print
546
 *
547
 * If len is zero, returns zero.  Otherwise returns the length of the
548
 * (nul-terminated) @buf string.
549
 */
550
static inline int cpumask_scnprintf(char *buf, int len,
551
				    const struct cpumask *srcp)
552
{
553
	return bitmap_scnprintf(buf, len, cpumask_bits(srcp), nr_cpumask_bits);
554
}
555
 
556
/**
557
 * cpumask_parse_user - extract a cpumask from a user string
558
 * @buf: the buffer to extract from
559
 * @len: the length of the buffer
560
 * @dstp: the cpumask to set.
561
 *
562
 * Returns -errno, or 0 for success.
563
 */
564
static inline int cpumask_parse_user(const char __user *buf, int len,
565
				     struct cpumask *dstp)
566
{
567
	return bitmap_parse_user(buf, len, cpumask_bits(dstp), nr_cpumask_bits);
568
}
569
 
570
/**
571
 * cpumask_parselist_user - extract a cpumask from a user string
572
 * @buf: the buffer to extract from
573
 * @len: the length of the buffer
574
 * @dstp: the cpumask to set.
575
 *
576
 * Returns -errno, or 0 for success.
577
 */
578
static inline int cpumask_parselist_user(const char __user *buf, int len,
579
				     struct cpumask *dstp)
580
{
581
	return bitmap_parselist_user(buf, len, cpumask_bits(dstp),
582
							nr_cpumask_bits);
583
}
584
 
585
/**
586
 * cpulist_scnprintf - print a cpumask into a string as comma-separated list
587
 * @buf: the buffer to sprintf into
588
 * @len: the length of the buffer
589
 * @srcp: the cpumask to print
590
 *
591
 * If len is zero, returns zero.  Otherwise returns the length of the
592
 * (nul-terminated) @buf string.
593
 */
594
static inline int cpulist_scnprintf(char *buf, int len,
595
				    const struct cpumask *srcp)
596
{
597
	return bitmap_scnlistprintf(buf, len, cpumask_bits(srcp),
598
				    nr_cpumask_bits);
599
}
600
 
601
/**
602
 * cpumask_parse - extract a cpumask from from a string
603
 * @buf: the buffer to extract from
604
 * @dstp: the cpumask to set.
605
 *
606
 * Returns -errno, or 0 for success.
607
 */
608
static inline int cpumask_parse(const char *buf, struct cpumask *dstp)
609
{
610
	char *nl = strchr(buf, '\n');
611
	unsigned int len = nl ? (unsigned int)(nl - buf) : strlen(buf);
612
 
613
	return bitmap_parse(buf, len, cpumask_bits(dstp), nr_cpumask_bits);
614
}
615
 
616
/**
617
 * cpulist_parse - extract a cpumask from a user string of ranges
618
 * @buf: the buffer to extract from
619
 * @dstp: the cpumask to set.
620
 *
621
 * Returns -errno, or 0 for success.
622
 */
623
static inline int cpulist_parse(const char *buf, struct cpumask *dstp)
624
{
625
	return bitmap_parselist(buf, cpumask_bits(dstp), nr_cpumask_bits);
626
}
627
 
628
/**
629
 * cpumask_size - size to allocate for a 'struct cpumask' in bytes
630
 *
631
 * This will eventually be a runtime variable, depending on nr_cpu_ids.
632
 */
633
static inline size_t cpumask_size(void)
634
{
635
	/* FIXME: Once all cpumask assignments are eliminated, this
636
	 * can be nr_cpumask_bits */
637
	return BITS_TO_LONGS(NR_CPUS) * sizeof(long);
638
}
639
 
640
/*
641
 * cpumask_var_t: struct cpumask for stack usage.
642
 *
643
 * Oh, the wicked games we play!  In order to make kernel coding a
644
 * little more difficult, we typedef cpumask_var_t to an array or a
645
 * pointer: doing &mask on an array is a noop, so it still works.
646
 *
647
 * ie.
648
 *	cpumask_var_t tmpmask;
649
 *	if (!alloc_cpumask_var(&tmpmask, GFP_KERNEL))
650
 *		return -ENOMEM;
651
 *
652
 *	  ... use 'tmpmask' like a normal struct cpumask * ...
653
 *
654
 *	free_cpumask_var(tmpmask);
655
 *
656
 *
657
 * However, one notable exception is there. alloc_cpumask_var() allocates
658
 * only nr_cpumask_bits bits (in the other hand, real cpumask_t always has
659
 * NR_CPUS bits). Therefore you don't have to dereference cpumask_var_t.
660
 *
661
 *	cpumask_var_t tmpmask;
662
 *	if (!alloc_cpumask_var(&tmpmask, GFP_KERNEL))
663
 *		return -ENOMEM;
664
 *
665
 *	var = *tmpmask;
666
 *
667
 * This code makes NR_CPUS length memcopy and brings to a memory corruption.
668
 * cpumask_copy() provide safe copy functionality.
669
 *
670
 * Note that there is another evil here: If you define a cpumask_var_t
671
 * as a percpu variable then the way to obtain the address of the cpumask
672
 * structure differently influences what this_cpu_* operation needs to be
673
 * used. Please use this_cpu_cpumask_var_t in those cases. The direct use
674
 * of this_cpu_ptr() or this_cpu_read() will lead to failures when the
675
 * other type of cpumask_var_t implementation is configured.
676
 */
677
#ifdef CONFIG_CPUMASK_OFFSTACK
678
typedef struct cpumask *cpumask_var_t;
679
 
680
#define this_cpu_cpumask_var_ptr(x) this_cpu_read(x)
681
 
682
bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node);
683
bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags);
684
bool zalloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node);
685
bool zalloc_cpumask_var(cpumask_var_t *mask, gfp_t flags);
686
void alloc_bootmem_cpumask_var(cpumask_var_t *mask);
687
void free_cpumask_var(cpumask_var_t mask);
688
void free_bootmem_cpumask_var(cpumask_var_t mask);
689
 
690
#else
691
typedef struct cpumask cpumask_var_t[1];
692
 
693
#define this_cpu_cpumask_var_ptr(x) this_cpu_ptr(x)
694
 
695
static inline bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
696
{
697
	return true;
698
}
699
 
700
static inline bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags,
701
					  int node)
702
{
703
	return true;
704
}
705
 
706
static inline bool zalloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
707
{
708
	cpumask_clear(*mask);
709
	return true;
710
}
711
 
712
static inline bool zalloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags,
713
					  int node)
714
{
715
	cpumask_clear(*mask);
716
	return true;
717
}
718
 
719
static inline void alloc_bootmem_cpumask_var(cpumask_var_t *mask)
720
{
721
}
722
 
723
static inline void free_cpumask_var(cpumask_var_t mask)
724
{
725
}
726
 
727
static inline void free_bootmem_cpumask_var(cpumask_var_t mask)
728
{
729
}
730
#endif /* CONFIG_CPUMASK_OFFSTACK */
731
 
732
/* It's common to want to use cpu_all_mask in struct member initializers,
733
 * so it has to refer to an address rather than a pointer. */
734
extern const DECLARE_BITMAP(cpu_all_bits, NR_CPUS);
735
#define cpu_all_mask to_cpumask(cpu_all_bits)
736
 
737
/* First bits of cpu_bit_bitmap are in fact unset. */
738
#define cpu_none_mask to_cpumask(cpu_bit_bitmap[0])
739
 
740
#define for_each_possible_cpu(cpu) for_each_cpu((cpu), cpu_possible_mask)
741
#define for_each_online_cpu(cpu)   for_each_cpu((cpu), cpu_online_mask)
742
#define for_each_present_cpu(cpu)  for_each_cpu((cpu), cpu_present_mask)
743
 
744
/* Wrappers for arch boot code to manipulate normally-constant masks */
745
void set_cpu_possible(unsigned int cpu, bool possible);
746
void set_cpu_present(unsigned int cpu, bool present);
747
void set_cpu_online(unsigned int cpu, bool online);
748
void set_cpu_active(unsigned int cpu, bool active);
749
void init_cpu_present(const struct cpumask *src);
750
void init_cpu_possible(const struct cpumask *src);
751
void init_cpu_online(const struct cpumask *src);
752
 
753
/**
754
 * to_cpumask - convert an NR_CPUS bitmap to a struct cpumask *
755
 * @bitmap: the bitmap
756
 *
757
 * There are a few places where cpumask_var_t isn't appropriate and
758
 * static cpumasks must be used (eg. very early boot), yet we don't
759
 * expose the definition of 'struct cpumask'.
760
 *
761
 * This does the conversion, and can be used as a constant initializer.
762
 */
763
#define to_cpumask(bitmap)						\
764
	((struct cpumask *)(1 ? (bitmap)				\
765
			    : (void *)sizeof(__check_is_bitmap(bitmap))))
766
 
767
static inline int __check_is_bitmap(const unsigned long *bitmap)
768
{
769
	return 1;
770
}
771
 
772
/*
773
 * Special-case data structure for "single bit set only" constant CPU masks.
774
 *
775
 * We pre-generate all the 64 (or 32) possible bit positions, with enough
776
 * padding to the left and the right, and return the constant pointer
777
 * appropriately offset.
778
 */
779
extern const unsigned long
780
	cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)];
781
 
782
static inline const struct cpumask *get_cpu_mask(unsigned int cpu)
783
{
784
	const unsigned long *p = cpu_bit_bitmap[1 + cpu % BITS_PER_LONG];
785
	p -= cpu / BITS_PER_LONG;
786
	return to_cpumask(p);
787
}
788
 
789
#define cpu_is_offline(cpu)	unlikely(!cpu_online(cpu))
790
 
791
#if NR_CPUS <= BITS_PER_LONG
792
#define CPU_BITS_ALL						\
793
{								\
794
	[BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD	\
795
}
796
 
797
#else /* NR_CPUS > BITS_PER_LONG */
798
 
799
#define CPU_BITS_ALL						\
800
{								\
801
	[0 ... BITS_TO_LONGS(NR_CPUS)-2] = ~0UL,		\
802
	[BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD		\
803
}
804
#endif /* NR_CPUS > BITS_PER_LONG */
805
 
806
/**
807
 * cpumap_print_to_pagebuf  - copies the cpumask into the buffer either
808
 *	as comma-separated list of cpus or hex values of cpumask
809
 * @list: indicates whether the cpumap must be list
810
 * @mask: the cpumask to copy
811
 * @buf: the buffer to copy into
812
 *
813
 * Returns the length of the (null-terminated) @buf string, zero if
814
 * nothing is copied.
815
 */
816
static inline ssize_t
817
cpumap_print_to_pagebuf(bool list, char *buf, const struct cpumask *mask)
818
{
819
	return bitmap_print_to_pagebuf(list, buf, cpumask_bits(mask),
820
				      nr_cpumask_bits);
821
}
822
 
823
/*
824
 *
825
 * From here down, all obsolete.  Use cpumask_ variants!
826
 *
827
 */
828
#ifndef CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS
829
#define cpumask_of_cpu(cpu) (*get_cpu_mask(cpu))
830
 
831
#define CPU_MASK_LAST_WORD BITMAP_LAST_WORD_MASK(NR_CPUS)
832
 
833
#if NR_CPUS <= BITS_PER_LONG
834
 
835
#define CPU_MASK_ALL							\
836
(cpumask_t) { {								\
837
	[BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD			\
838
} }
839
 
840
#else
841
 
842
#define CPU_MASK_ALL							\
843
(cpumask_t) { {								\
844
	[0 ... BITS_TO_LONGS(NR_CPUS)-2] = ~0UL,			\
845
	[BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD			\
846
} }
847
 
848
#endif
849
 
850
#define CPU_MASK_NONE							\
851
(cpumask_t) { {								\
852
	[0 ... BITS_TO_LONGS(NR_CPUS)-1] =  0UL				\
853
} }
854
 
855
#define CPU_MASK_CPU0							\
856
(cpumask_t) { {								\
857
	[0] =  1UL							\
858
} }
859
 
860
#if NR_CPUS == 1
861
#define first_cpu(src)		({ (void)(src); 0; })
862
#define next_cpu(n, src)	({ (void)(src); 1; })
863
#define any_online_cpu(mask)	0
864
#define for_each_cpu_mask(cpu, mask)	\
865
	for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask)
866
#else /* NR_CPUS > 1 */
867
int __first_cpu(const cpumask_t *srcp);
868
int __next_cpu(int n, const cpumask_t *srcp);
869
 
870
#define first_cpu(src)		__first_cpu(&(src))
871
#define next_cpu(n, src)	__next_cpu((n), &(src))
872
#define any_online_cpu(mask) cpumask_any_and(&mask, cpu_online_mask)
873
#define for_each_cpu_mask(cpu, mask)			\
874
	for ((cpu) = -1;				\
875
		(cpu) = next_cpu((cpu), (mask)),	\
876
		(cpu) < NR_CPUS; )
877
#endif /* SMP */
878
 
879
#if NR_CPUS <= 64
880
 
881
#define for_each_cpu_mask_nr(cpu, mask)	for_each_cpu_mask(cpu, mask)
882
 
883
#else /* NR_CPUS > 64 */
884
 
885
int __next_cpu_nr(int n, const cpumask_t *srcp);
886
#define for_each_cpu_mask_nr(cpu, mask)			\
887
	for ((cpu) = -1;				\
888
		(cpu) = __next_cpu_nr((cpu), &(mask)),	\
889
		(cpu) < nr_cpu_ids; )
890
 
891
#endif /* NR_CPUS > 64 */
892
 
893
#define cpus_addr(src) ((src).bits)
894
 
895
#define cpu_set(cpu, dst) __cpu_set((cpu), &(dst))
896
static inline void __cpu_set(int cpu, volatile cpumask_t *dstp)
897
{
898
	set_bit(cpu, dstp->bits);
899
}
900
 
901
#define cpu_clear(cpu, dst) __cpu_clear((cpu), &(dst))
902
static inline void __cpu_clear(int cpu, volatile cpumask_t *dstp)
903
{
904
	clear_bit(cpu, dstp->bits);
905
}
906
 
907
#define cpus_setall(dst) __cpus_setall(&(dst), NR_CPUS)
908
static inline void __cpus_setall(cpumask_t *dstp, int nbits)
909
{
910
	bitmap_fill(dstp->bits, nbits);
911
}
912
 
913
#define cpus_clear(dst) __cpus_clear(&(dst), NR_CPUS)
914
static inline void __cpus_clear(cpumask_t *dstp, int nbits)
915
{
916
	bitmap_zero(dstp->bits, nbits);
917
}
918
 
919
/* No static inline type checking - see Subtlety (1) above. */
920
#define cpu_isset(cpu, cpumask) test_bit((cpu), (cpumask).bits)
921
 
922
#define cpu_test_and_set(cpu, cpumask) __cpu_test_and_set((cpu), &(cpumask))
923
static inline int __cpu_test_and_set(int cpu, cpumask_t *addr)
924
{
925
	return test_and_set_bit(cpu, addr->bits);
926
}
927
 
928
#define cpus_and(dst, src1, src2) __cpus_and(&(dst), &(src1), &(src2), NR_CPUS)
929
static inline int __cpus_and(cpumask_t *dstp, const cpumask_t *src1p,
930
					const cpumask_t *src2p, int nbits)
931
{
932
	return bitmap_and(dstp->bits, src1p->bits, src2p->bits, nbits);
933
}
934
 
935
#define cpus_or(dst, src1, src2) __cpus_or(&(dst), &(src1), &(src2), NR_CPUS)
936
static inline void __cpus_or(cpumask_t *dstp, const cpumask_t *src1p,
937
					const cpumask_t *src2p, int nbits)
938
{
939
	bitmap_or(dstp->bits, src1p->bits, src2p->bits, nbits);
940
}
941
 
942
#define cpus_xor(dst, src1, src2) __cpus_xor(&(dst), &(src1), &(src2), NR_CPUS)
943
static inline void __cpus_xor(cpumask_t *dstp, const cpumask_t *src1p,
944
					const cpumask_t *src2p, int nbits)
945
{
946
	bitmap_xor(dstp->bits, src1p->bits, src2p->bits, nbits);
947
}
948
 
949
#define cpus_andnot(dst, src1, src2) \
950
				__cpus_andnot(&(dst), &(src1), &(src2), NR_CPUS)
951
static inline int __cpus_andnot(cpumask_t *dstp, const cpumask_t *src1p,
952
					const cpumask_t *src2p, int nbits)
953
{
954
	return bitmap_andnot(dstp->bits, src1p->bits, src2p->bits, nbits);
955
}
956
 
957
#define cpus_equal(src1, src2) __cpus_equal(&(src1), &(src2), NR_CPUS)
958
static inline int __cpus_equal(const cpumask_t *src1p,
959
					const cpumask_t *src2p, int nbits)
960
{
961
	return bitmap_equal(src1p->bits, src2p->bits, nbits);
962
}
963
 
964
#define cpus_intersects(src1, src2) __cpus_intersects(&(src1), &(src2), NR_CPUS)
965
static inline int __cpus_intersects(const cpumask_t *src1p,
966
					const cpumask_t *src2p, int nbits)
967
{
968
	return bitmap_intersects(src1p->bits, src2p->bits, nbits);
969
}
970
 
971
#define cpus_subset(src1, src2) __cpus_subset(&(src1), &(src2), NR_CPUS)
972
static inline int __cpus_subset(const cpumask_t *src1p,
973
					const cpumask_t *src2p, int nbits)
974
{
975
	return bitmap_subset(src1p->bits, src2p->bits, nbits);
976
}
977
 
978
#define cpus_empty(src) __cpus_empty(&(src), NR_CPUS)
979
static inline int __cpus_empty(const cpumask_t *srcp, int nbits)
980
{
981
	return bitmap_empty(srcp->bits, nbits);
982
}
983
 
984
#define cpus_weight(cpumask) __cpus_weight(&(cpumask), NR_CPUS)
985
static inline int __cpus_weight(const cpumask_t *srcp, int nbits)
986
{
987
	return bitmap_weight(srcp->bits, nbits);
988
}
989
 
990
#define cpus_shift_left(dst, src, n) \
991
			__cpus_shift_left(&(dst), &(src), (n), NR_CPUS)
992
static inline void __cpus_shift_left(cpumask_t *dstp,
993
					const cpumask_t *srcp, int n, int nbits)
994
{
995
	bitmap_shift_left(dstp->bits, srcp->bits, n, nbits);
996
}
997
#endif /* !CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS */
998
 
999
#endif /* __LINUX_CPUMASK_H */