Subversion Repositories Kolibri OS

Rev

Rev 6082 | Rev 6936 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
5270 serge 1
#ifndef __LINUX_CPUMASK_H
2
#define __LINUX_CPUMASK_H
3
 
4
/*
5
 * Cpumasks provide a bitmap suitable for representing the
6
 * set of CPU's in a system, one bit position per CPU number.  In general,
7
 * only nr_cpu_ids (<= NR_CPUS) bits are valid.
8
 */
9
#include 
10
#include 
11
#include 
12
#include 
13
 
6082 serge 14
/* Don't assign or return these: may not be this big! */
5270 serge 15
typedef struct cpumask { DECLARE_BITMAP(bits, NR_CPUS); } cpumask_t;
16
 
17
/**
18
 * cpumask_bits - get the bits in a cpumask
19
 * @maskp: the struct cpumask *
20
 *
21
 * You should only assume nr_cpu_ids bits of this mask are valid.  This is
22
 * a macro so it's const-correct.
23
 */
24
#define cpumask_bits(maskp) ((maskp)->bits)
25
 
6082 serge 26
/**
27
 * cpumask_pr_args - printf args to output a cpumask
28
 * @maskp: cpumask to be printed
29
 *
30
 * Can be used to provide arguments for '%*pb[l]' when printing a cpumask.
31
 */
32
#define cpumask_pr_args(maskp)		nr_cpu_ids, cpumask_bits(maskp)
33
 
5270 serge 34
#if NR_CPUS == 1
35
#define nr_cpu_ids		1
36
#else
37
extern int nr_cpu_ids;
38
#endif
39
 
40
#ifdef CONFIG_CPUMASK_OFFSTACK
41
/* Assuming NR_CPUS is huge, a runtime limit is more efficient.  Also,
42
 * not all bits may be allocated. */
43
#define nr_cpumask_bits	nr_cpu_ids
44
#else
45
#define nr_cpumask_bits	NR_CPUS
46
#endif
47
 
48
/*
49
 * The following particular system cpumasks and operations manage
50
 * possible, present, active and online cpus.
51
 *
52
 *     cpu_possible_mask- has bit 'cpu' set iff cpu is populatable
53
 *     cpu_present_mask - has bit 'cpu' set iff cpu is populated
54
 *     cpu_online_mask  - has bit 'cpu' set iff cpu available to scheduler
55
 *     cpu_active_mask  - has bit 'cpu' set iff cpu available to migration
56
 *
57
 *  If !CONFIG_HOTPLUG_CPU, present == possible, and active == online.
58
 *
59
 *  The cpu_possible_mask is fixed at boot time, as the set of CPU id's
60
 *  that it is possible might ever be plugged in at anytime during the
61
 *  life of that system boot.  The cpu_present_mask is dynamic(*),
62
 *  representing which CPUs are currently plugged in.  And
63
 *  cpu_online_mask is the dynamic subset of cpu_present_mask,
64
 *  indicating those CPUs available for scheduling.
65
 *
66
 *  If HOTPLUG is enabled, then cpu_possible_mask is forced to have
67
 *  all NR_CPUS bits set, otherwise it is just the set of CPUs that
68
 *  ACPI reports present at boot.
69
 *
70
 *  If HOTPLUG is enabled, then cpu_present_mask varies dynamically,
71
 *  depending on what ACPI reports as currently plugged in, otherwise
72
 *  cpu_present_mask is just a copy of cpu_possible_mask.
73
 *
74
 *  (*) Well, cpu_present_mask is dynamic in the hotplug case.  If not
75
 *      hotplug, it's a copy of cpu_possible_mask, hence fixed at boot.
76
 *
77
 * Subtleties:
78
 * 1) UP arch's (NR_CPUS == 1, CONFIG_SMP not defined) hardcode
79
 *    assumption that their single CPU is online.  The UP
80
 *    cpu_{online,possible,present}_masks are placebos.  Changing them
81
 *    will have no useful affect on the following num_*_cpus()
82
 *    and cpu_*() macros in the UP case.  This ugliness is a UP
83
 *    optimization - don't waste any instructions or memory references
84
 *    asking if you're online or how many CPUs there are if there is
85
 *    only one CPU.
86
 */
87
 
88
extern const struct cpumask *const cpu_possible_mask;
89
extern const struct cpumask *const cpu_online_mask;
90
extern const struct cpumask *const cpu_present_mask;
91
extern const struct cpumask *const cpu_active_mask;
92
 
93
#if NR_CPUS > 1
94
#define num_online_cpus()	cpumask_weight(cpu_online_mask)
95
#define num_possible_cpus()	cpumask_weight(cpu_possible_mask)
96
#define num_present_cpus()	cpumask_weight(cpu_present_mask)
97
#define num_active_cpus()	cpumask_weight(cpu_active_mask)
98
#define cpu_online(cpu)		cpumask_test_cpu((cpu), cpu_online_mask)
99
#define cpu_possible(cpu)	cpumask_test_cpu((cpu), cpu_possible_mask)
100
#define cpu_present(cpu)	cpumask_test_cpu((cpu), cpu_present_mask)
101
#define cpu_active(cpu)		cpumask_test_cpu((cpu), cpu_active_mask)
102
#else
103
#define num_online_cpus()	1U
104
#define num_possible_cpus()	1U
105
#define num_present_cpus()	1U
106
#define num_active_cpus()	1U
107
#define cpu_online(cpu)		((cpu) == 0)
108
#define cpu_possible(cpu)	((cpu) == 0)
109
#define cpu_present(cpu)	((cpu) == 0)
110
#define cpu_active(cpu)		((cpu) == 0)
111
#endif
112
 
113
/* verify cpu argument to cpumask_* operators */
114
static inline unsigned int cpumask_check(unsigned int cpu)
115
{
116
#ifdef CONFIG_DEBUG_PER_CPU_MAPS
117
	WARN_ON_ONCE(cpu >= nr_cpumask_bits);
118
#endif /* CONFIG_DEBUG_PER_CPU_MAPS */
119
	return cpu;
120
}
121
 
122
#if NR_CPUS == 1
123
/* Uniprocessor.  Assume all masks are "1". */
124
static inline unsigned int cpumask_first(const struct cpumask *srcp)
125
{
126
	return 0;
127
}
128
 
129
/* Valid inputs for n are -1 and 0. */
130
static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
131
{
132
	return n+1;
133
}
134
 
135
static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
136
{
137
	return n+1;
138
}
139
 
140
static inline unsigned int cpumask_next_and(int n,
141
					    const struct cpumask *srcp,
142
					    const struct cpumask *andp)
143
{
144
	return n+1;
145
}
146
 
147
/* cpu must be a valid cpu, ie 0, so there's no other choice. */
148
static inline unsigned int cpumask_any_but(const struct cpumask *mask,
149
					   unsigned int cpu)
150
{
151
	return 1;
152
}
153
 
6082 serge 154
static inline unsigned int cpumask_local_spread(unsigned int i, int node)
5270 serge 155
{
156
	return 0;
157
}
158
 
159
#define for_each_cpu(cpu, mask)			\
160
	for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask)
161
#define for_each_cpu_not(cpu, mask)		\
162
	for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask)
163
#define for_each_cpu_and(cpu, mask, and)	\
164
	for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask, (void)and)
165
#else
166
/**
167
 * cpumask_first - get the first cpu in a cpumask
168
 * @srcp: the cpumask pointer
169
 *
170
 * Returns >= nr_cpu_ids if no cpus set.
171
 */
172
static inline unsigned int cpumask_first(const struct cpumask *srcp)
173
{
174
	return find_first_bit(cpumask_bits(srcp), nr_cpumask_bits);
175
}
176
 
177
/**
178
 * cpumask_next - get the next cpu in a cpumask
179
 * @n: the cpu prior to the place to search (ie. return will be > @n)
180
 * @srcp: the cpumask pointer
181
 *
182
 * Returns >= nr_cpu_ids if no further cpus set.
183
 */
184
static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
185
{
186
	/* -1 is a legal arg here. */
187
	if (n != -1)
188
		cpumask_check(n);
189
	return find_next_bit(cpumask_bits(srcp), nr_cpumask_bits, n+1);
190
}
191
 
192
/**
193
 * cpumask_next_zero - get the next unset cpu in a cpumask
194
 * @n: the cpu prior to the place to search (ie. return will be > @n)
195
 * @srcp: the cpumask pointer
196
 *
197
 * Returns >= nr_cpu_ids if no further cpus unset.
198
 */
199
static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
200
{
201
	/* -1 is a legal arg here. */
202
	if (n != -1)
203
		cpumask_check(n);
204
	return find_next_zero_bit(cpumask_bits(srcp), nr_cpumask_bits, n+1);
205
}
206
 
207
int cpumask_next_and(int n, const struct cpumask *, const struct cpumask *);
208
int cpumask_any_but(const struct cpumask *mask, unsigned int cpu);
6082 serge 209
unsigned int cpumask_local_spread(unsigned int i, int node);
5270 serge 210
 
211
/**
212
 * for_each_cpu - iterate over every cpu in a mask
213
 * @cpu: the (optionally unsigned) integer iterator
214
 * @mask: the cpumask pointer
215
 *
216
 * After the loop, cpu is >= nr_cpu_ids.
217
 */
218
#define for_each_cpu(cpu, mask)				\
219
	for ((cpu) = -1;				\
220
		(cpu) = cpumask_next((cpu), (mask)),	\
221
		(cpu) < nr_cpu_ids;)
222
 
223
/**
224
 * for_each_cpu_not - iterate over every cpu in a complemented mask
225
 * @cpu: the (optionally unsigned) integer iterator
226
 * @mask: the cpumask pointer
227
 *
228
 * After the loop, cpu is >= nr_cpu_ids.
229
 */
230
#define for_each_cpu_not(cpu, mask)				\
231
	for ((cpu) = -1;					\
232
		(cpu) = cpumask_next_zero((cpu), (mask)),	\
233
		(cpu) < nr_cpu_ids;)
234
 
235
/**
236
 * for_each_cpu_and - iterate over every cpu in both masks
237
 * @cpu: the (optionally unsigned) integer iterator
238
 * @mask: the first cpumask pointer
239
 * @and: the second cpumask pointer
240
 *
241
 * This saves a temporary CPU mask in many places.  It is equivalent to:
242
 *	struct cpumask tmp;
243
 *	cpumask_and(&tmp, &mask, &and);
244
 *	for_each_cpu(cpu, &tmp)
245
 *		...
246
 *
247
 * After the loop, cpu is >= nr_cpu_ids.
248
 */
249
#define for_each_cpu_and(cpu, mask, and)				\
250
	for ((cpu) = -1;						\
251
		(cpu) = cpumask_next_and((cpu), (mask), (and)),		\
252
		(cpu) < nr_cpu_ids;)
253
#endif /* SMP */
254
 
255
#define CPU_BITS_NONE						\
256
{								\
257
	[0 ... BITS_TO_LONGS(NR_CPUS)-1] = 0UL			\
258
}
259
 
260
#define CPU_BITS_CPU0						\
261
{								\
262
	[0] =  1UL						\
263
}
264
 
265
/**
266
 * cpumask_set_cpu - set a cpu in a cpumask
267
 * @cpu: cpu number (< nr_cpu_ids)
268
 * @dstp: the cpumask pointer
269
 */
270
static inline void cpumask_set_cpu(unsigned int cpu, struct cpumask *dstp)
271
{
272
	set_bit(cpumask_check(cpu), cpumask_bits(dstp));
273
}
274
 
275
/**
276
 * cpumask_clear_cpu - clear a cpu in a cpumask
277
 * @cpu: cpu number (< nr_cpu_ids)
278
 * @dstp: the cpumask pointer
279
 */
280
static inline void cpumask_clear_cpu(int cpu, struct cpumask *dstp)
281
{
282
	clear_bit(cpumask_check(cpu), cpumask_bits(dstp));
283
}
284
 
285
/**
286
 * cpumask_test_cpu - test for a cpu in a cpumask
287
 * @cpu: cpu number (< nr_cpu_ids)
288
 * @cpumask: the cpumask pointer
289
 *
290
 * Returns 1 if @cpu is set in @cpumask, else returns 0
291
 */
6082 serge 292
static inline int cpumask_test_cpu(int cpu, const struct cpumask *cpumask)
293
{
294
	return test_bit(cpumask_check(cpu), cpumask_bits((cpumask)));
295
}
5270 serge 296
 
297
/**
298
 * cpumask_test_and_set_cpu - atomically test and set a cpu in a cpumask
299
 * @cpu: cpu number (< nr_cpu_ids)
300
 * @cpumask: the cpumask pointer
301
 *
302
 * Returns 1 if @cpu is set in old bitmap of @cpumask, else returns 0
303
 *
304
 * test_and_set_bit wrapper for cpumasks.
305
 */
306
static inline int cpumask_test_and_set_cpu(int cpu, struct cpumask *cpumask)
307
{
308
	return test_and_set_bit(cpumask_check(cpu), cpumask_bits(cpumask));
309
}
310
 
311
/**
312
 * cpumask_test_and_clear_cpu - atomically test and clear a cpu in a cpumask
313
 * @cpu: cpu number (< nr_cpu_ids)
314
 * @cpumask: the cpumask pointer
315
 *
316
 * Returns 1 if @cpu is set in old bitmap of @cpumask, else returns 0
317
 *
318
 * test_and_clear_bit wrapper for cpumasks.
319
 */
320
static inline int cpumask_test_and_clear_cpu(int cpu, struct cpumask *cpumask)
321
{
322
	return test_and_clear_bit(cpumask_check(cpu), cpumask_bits(cpumask));
323
}
324
 
325
/**
326
 * cpumask_setall - set all cpus (< nr_cpu_ids) in a cpumask
327
 * @dstp: the cpumask pointer
328
 */
329
static inline void cpumask_setall(struct cpumask *dstp)
330
{
331
	bitmap_fill(cpumask_bits(dstp), nr_cpumask_bits);
332
}
333
 
334
/**
335
 * cpumask_clear - clear all cpus (< nr_cpu_ids) in a cpumask
336
 * @dstp: the cpumask pointer
337
 */
338
static inline void cpumask_clear(struct cpumask *dstp)
339
{
340
	bitmap_zero(cpumask_bits(dstp), nr_cpumask_bits);
341
}
342
 
343
/**
344
 * cpumask_and - *dstp = *src1p & *src2p
345
 * @dstp: the cpumask result
346
 * @src1p: the first input
347
 * @src2p: the second input
348
 *
349
 * If *@dstp is empty, returns 0, else returns 1
350
 */
351
static inline int cpumask_and(struct cpumask *dstp,
352
			       const struct cpumask *src1p,
353
			       const struct cpumask *src2p)
354
{
355
	return bitmap_and(cpumask_bits(dstp), cpumask_bits(src1p),
356
				       cpumask_bits(src2p), nr_cpumask_bits);
357
}
358
 
359
/**
360
 * cpumask_or - *dstp = *src1p | *src2p
361
 * @dstp: the cpumask result
362
 * @src1p: the first input
363
 * @src2p: the second input
364
 */
365
static inline void cpumask_or(struct cpumask *dstp, const struct cpumask *src1p,
366
			      const struct cpumask *src2p)
367
{
368
	bitmap_or(cpumask_bits(dstp), cpumask_bits(src1p),
369
				      cpumask_bits(src2p), nr_cpumask_bits);
370
}
371
 
372
/**
373
 * cpumask_xor - *dstp = *src1p ^ *src2p
374
 * @dstp: the cpumask result
375
 * @src1p: the first input
376
 * @src2p: the second input
377
 */
378
static inline void cpumask_xor(struct cpumask *dstp,
379
			       const struct cpumask *src1p,
380
			       const struct cpumask *src2p)
381
{
382
	bitmap_xor(cpumask_bits(dstp), cpumask_bits(src1p),
383
				       cpumask_bits(src2p), nr_cpumask_bits);
384
}
385
 
386
/**
387
 * cpumask_andnot - *dstp = *src1p & ~*src2p
388
 * @dstp: the cpumask result
389
 * @src1p: the first input
390
 * @src2p: the second input
391
 *
392
 * If *@dstp is empty, returns 0, else returns 1
393
 */
394
static inline int cpumask_andnot(struct cpumask *dstp,
395
				  const struct cpumask *src1p,
396
				  const struct cpumask *src2p)
397
{
398
	return bitmap_andnot(cpumask_bits(dstp), cpumask_bits(src1p),
399
					  cpumask_bits(src2p), nr_cpumask_bits);
400
}
401
 
402
/**
403
 * cpumask_complement - *dstp = ~*srcp
404
 * @dstp: the cpumask result
405
 * @srcp: the input to invert
406
 */
407
static inline void cpumask_complement(struct cpumask *dstp,
408
				      const struct cpumask *srcp)
409
{
410
	bitmap_complement(cpumask_bits(dstp), cpumask_bits(srcp),
411
					      nr_cpumask_bits);
412
}
413
 
414
/**
415
 * cpumask_equal - *src1p == *src2p
416
 * @src1p: the first input
417
 * @src2p: the second input
418
 */
419
static inline bool cpumask_equal(const struct cpumask *src1p,
420
				const struct cpumask *src2p)
421
{
422
	return bitmap_equal(cpumask_bits(src1p), cpumask_bits(src2p),
423
						 nr_cpumask_bits);
424
}
425
 
426
/**
427
 * cpumask_intersects - (*src1p & *src2p) != 0
428
 * @src1p: the first input
429
 * @src2p: the second input
430
 */
431
static inline bool cpumask_intersects(const struct cpumask *src1p,
432
				     const struct cpumask *src2p)
433
{
434
	return bitmap_intersects(cpumask_bits(src1p), cpumask_bits(src2p),
435
						      nr_cpumask_bits);
436
}
437
 
438
/**
439
 * cpumask_subset - (*src1p & ~*src2p) == 0
440
 * @src1p: the first input
441
 * @src2p: the second input
442
 *
443
 * Returns 1 if *@src1p is a subset of *@src2p, else returns 0
444
 */
445
static inline int cpumask_subset(const struct cpumask *src1p,
446
				 const struct cpumask *src2p)
447
{
448
	return bitmap_subset(cpumask_bits(src1p), cpumask_bits(src2p),
449
						  nr_cpumask_bits);
450
}
451
 
452
/**
453
 * cpumask_empty - *srcp == 0
454
 * @srcp: the cpumask to that all cpus < nr_cpu_ids are clear.
455
 */
456
static inline bool cpumask_empty(const struct cpumask *srcp)
457
{
458
	return bitmap_empty(cpumask_bits(srcp), nr_cpumask_bits);
459
}
460
 
461
/**
462
 * cpumask_full - *srcp == 0xFFFFFFFF...
463
 * @srcp: the cpumask to that all cpus < nr_cpu_ids are set.
464
 */
465
static inline bool cpumask_full(const struct cpumask *srcp)
466
{
467
	return bitmap_full(cpumask_bits(srcp), nr_cpumask_bits);
468
}
469
 
470
/**
471
 * cpumask_weight - Count of bits in *srcp
472
 * @srcp: the cpumask to count bits (< nr_cpu_ids) in.
473
 */
474
static inline unsigned int cpumask_weight(const struct cpumask *srcp)
475
{
476
	return bitmap_weight(cpumask_bits(srcp), nr_cpumask_bits);
477
}
478
 
479
/**
480
 * cpumask_shift_right - *dstp = *srcp >> n
481
 * @dstp: the cpumask result
482
 * @srcp: the input to shift
483
 * @n: the number of bits to shift by
484
 */
485
static inline void cpumask_shift_right(struct cpumask *dstp,
486
				       const struct cpumask *srcp, int n)
487
{
488
	bitmap_shift_right(cpumask_bits(dstp), cpumask_bits(srcp), n,
489
					       nr_cpumask_bits);
490
}
491
 
492
/**
493
 * cpumask_shift_left - *dstp = *srcp << n
494
 * @dstp: the cpumask result
495
 * @srcp: the input to shift
496
 * @n: the number of bits to shift by
497
 */
498
static inline void cpumask_shift_left(struct cpumask *dstp,
499
				      const struct cpumask *srcp, int n)
500
{
501
	bitmap_shift_left(cpumask_bits(dstp), cpumask_bits(srcp), n,
502
					      nr_cpumask_bits);
503
}
504
 
505
/**
506
 * cpumask_copy - *dstp = *srcp
507
 * @dstp: the result
508
 * @srcp: the input cpumask
509
 */
510
static inline void cpumask_copy(struct cpumask *dstp,
511
				const struct cpumask *srcp)
512
{
513
	bitmap_copy(cpumask_bits(dstp), cpumask_bits(srcp), nr_cpumask_bits);
514
}
515
 
516
/**
517
 * cpumask_any - pick a "random" cpu from *srcp
518
 * @srcp: the input cpumask
519
 *
520
 * Returns >= nr_cpu_ids if no cpus set.
521
 */
522
#define cpumask_any(srcp) cpumask_first(srcp)
523
 
524
/**
525
 * cpumask_first_and - return the first cpu from *srcp1 & *srcp2
526
 * @src1p: the first input
527
 * @src2p: the second input
528
 *
529
 * Returns >= nr_cpu_ids if no cpus set in both.  See also cpumask_next_and().
530
 */
531
#define cpumask_first_and(src1p, src2p) cpumask_next_and(-1, (src1p), (src2p))
532
 
533
/**
534
 * cpumask_any_and - pick a "random" cpu from *mask1 & *mask2
535
 * @mask1: the first input cpumask
536
 * @mask2: the second input cpumask
537
 *
538
 * Returns >= nr_cpu_ids if no cpus set.
539
 */
540
#define cpumask_any_and(mask1, mask2) cpumask_first_and((mask1), (mask2))
541
 
542
/**
543
 * cpumask_of - the cpumask containing just a given cpu
544
 * @cpu: the cpu (<= nr_cpu_ids)
545
 */
546
#define cpumask_of(cpu) (get_cpu_mask(cpu))
547
 
548
/**
549
 * cpumask_parse_user - extract a cpumask from a user string
550
 * @buf: the buffer to extract from
551
 * @len: the length of the buffer
552
 * @dstp: the cpumask to set.
553
 *
554
 * Returns -errno, or 0 for success.
555
 */
556
static inline int cpumask_parse_user(const char __user *buf, int len,
557
				     struct cpumask *dstp)
558
{
6934 serge 559
	return bitmap_parse_user(buf, len, cpumask_bits(dstp), nr_cpumask_bits);
5270 serge 560
}
561
 
562
/**
563
 * cpumask_parselist_user - extract a cpumask from a user string
564
 * @buf: the buffer to extract from
565
 * @len: the length of the buffer
566
 * @dstp: the cpumask to set.
567
 *
568
 * Returns -errno, or 0 for success.
569
 */
570
static inline int cpumask_parselist_user(const char __user *buf, int len,
571
				     struct cpumask *dstp)
572
{
573
	return bitmap_parselist_user(buf, len, cpumask_bits(dstp),
6934 serge 574
				     nr_cpumask_bits);
5270 serge 575
}
576
 
577
/**
578
 * cpumask_parse - extract a cpumask from from a string
579
 * @buf: the buffer to extract from
580
 * @dstp: the cpumask to set.
581
 *
582
 * Returns -errno, or 0 for success.
583
 */
584
static inline int cpumask_parse(const char *buf, struct cpumask *dstp)
585
{
586
	char *nl = strchr(buf, '\n');
587
	unsigned int len = nl ? (unsigned int)(nl - buf) : strlen(buf);
588
 
6934 serge 589
	return bitmap_parse(buf, len, cpumask_bits(dstp), nr_cpumask_bits);
5270 serge 590
}
591
 
592
/**
593
 * cpulist_parse - extract a cpumask from a user string of ranges
594
 * @buf: the buffer to extract from
595
 * @dstp: the cpumask to set.
596
 *
597
 * Returns -errno, or 0 for success.
598
 */
599
static inline int cpulist_parse(const char *buf, struct cpumask *dstp)
600
{
6934 serge 601
	return bitmap_parselist(buf, cpumask_bits(dstp), nr_cpumask_bits);
5270 serge 602
}
603
 
604
/**
605
 * cpumask_size - size to allocate for a 'struct cpumask' in bytes
606
 *
607
 * This will eventually be a runtime variable, depending on nr_cpu_ids.
608
 */
609
static inline size_t cpumask_size(void)
610
{
6082 serge 611
	return BITS_TO_LONGS(nr_cpumask_bits) * sizeof(long);
5270 serge 612
}
613
 
614
/*
615
 * cpumask_var_t: struct cpumask for stack usage.
616
 *
617
 * Oh, the wicked games we play!  In order to make kernel coding a
618
 * little more difficult, we typedef cpumask_var_t to an array or a
619
 * pointer: doing &mask on an array is a noop, so it still works.
620
 *
621
 * ie.
622
 *	cpumask_var_t tmpmask;
623
 *	if (!alloc_cpumask_var(&tmpmask, GFP_KERNEL))
624
 *		return -ENOMEM;
625
 *
626
 *	  ... use 'tmpmask' like a normal struct cpumask * ...
627
 *
628
 *	free_cpumask_var(tmpmask);
629
 *
630
 *
631
 * However, one notable exception is there. alloc_cpumask_var() allocates
632
 * only nr_cpumask_bits bits (in the other hand, real cpumask_t always has
633
 * NR_CPUS bits). Therefore you don't have to dereference cpumask_var_t.
634
 *
635
 *	cpumask_var_t tmpmask;
636
 *	if (!alloc_cpumask_var(&tmpmask, GFP_KERNEL))
637
 *		return -ENOMEM;
638
 *
639
 *	var = *tmpmask;
640
 *
641
 * This code makes NR_CPUS length memcopy and brings to a memory corruption.
642
 * cpumask_copy() provide safe copy functionality.
643
 *
644
 * Note that there is another evil here: If you define a cpumask_var_t
645
 * as a percpu variable then the way to obtain the address of the cpumask
646
 * structure differently influences what this_cpu_* operation needs to be
647
 * used. Please use this_cpu_cpumask_var_t in those cases. The direct use
648
 * of this_cpu_ptr() or this_cpu_read() will lead to failures when the
649
 * other type of cpumask_var_t implementation is configured.
650
 */
651
#ifdef CONFIG_CPUMASK_OFFSTACK
652
typedef struct cpumask *cpumask_var_t;
653
 
654
#define this_cpu_cpumask_var_ptr(x) this_cpu_read(x)
655
 
656
bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node);
657
bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags);
658
bool zalloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node);
659
bool zalloc_cpumask_var(cpumask_var_t *mask, gfp_t flags);
660
void alloc_bootmem_cpumask_var(cpumask_var_t *mask);
661
void free_cpumask_var(cpumask_var_t mask);
662
void free_bootmem_cpumask_var(cpumask_var_t mask);
663
 
664
#else
665
typedef struct cpumask cpumask_var_t[1];
666
 
667
#define this_cpu_cpumask_var_ptr(x) this_cpu_ptr(x)
668
 
669
static inline bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
670
{
671
	return true;
672
}
673
 
674
static inline bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags,
675
					  int node)
676
{
677
	return true;
678
}
679
 
680
static inline bool zalloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
681
{
682
	cpumask_clear(*mask);
683
	return true;
684
}
685
 
686
static inline bool zalloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags,
687
					  int node)
688
{
689
	cpumask_clear(*mask);
690
	return true;
691
}
692
 
693
static inline void alloc_bootmem_cpumask_var(cpumask_var_t *mask)
694
{
695
}
696
 
697
static inline void free_cpumask_var(cpumask_var_t mask)
698
{
699
}
700
 
701
static inline void free_bootmem_cpumask_var(cpumask_var_t mask)
702
{
703
}
704
#endif /* CONFIG_CPUMASK_OFFSTACK */
705
 
706
/* It's common to want to use cpu_all_mask in struct member initializers,
707
 * so it has to refer to an address rather than a pointer. */
708
extern const DECLARE_BITMAP(cpu_all_bits, NR_CPUS);
709
#define cpu_all_mask to_cpumask(cpu_all_bits)
710
 
711
/* First bits of cpu_bit_bitmap are in fact unset. */
712
#define cpu_none_mask to_cpumask(cpu_bit_bitmap[0])
713
 
714
#define for_each_possible_cpu(cpu) for_each_cpu((cpu), cpu_possible_mask)
715
#define for_each_online_cpu(cpu)   for_each_cpu((cpu), cpu_online_mask)
716
#define for_each_present_cpu(cpu)  for_each_cpu((cpu), cpu_present_mask)
717
 
718
/* Wrappers for arch boot code to manipulate normally-constant masks */
719
void set_cpu_possible(unsigned int cpu, bool possible);
720
void set_cpu_present(unsigned int cpu, bool present);
721
void set_cpu_online(unsigned int cpu, bool online);
722
void set_cpu_active(unsigned int cpu, bool active);
723
void init_cpu_present(const struct cpumask *src);
724
void init_cpu_possible(const struct cpumask *src);
725
void init_cpu_online(const struct cpumask *src);
726
 
727
/**
728
 * to_cpumask - convert an NR_CPUS bitmap to a struct cpumask *
729
 * @bitmap: the bitmap
730
 *
731
 * There are a few places where cpumask_var_t isn't appropriate and
732
 * static cpumasks must be used (eg. very early boot), yet we don't
733
 * expose the definition of 'struct cpumask'.
734
 *
735
 * This does the conversion, and can be used as a constant initializer.
736
 */
737
#define to_cpumask(bitmap)						\
738
	((struct cpumask *)(1 ? (bitmap)				\
739
			    : (void *)sizeof(__check_is_bitmap(bitmap))))
740
 
741
static inline int __check_is_bitmap(const unsigned long *bitmap)
742
{
743
	return 1;
744
}
745
 
746
/*
747
 * Special-case data structure for "single bit set only" constant CPU masks.
748
 *
749
 * We pre-generate all the 64 (or 32) possible bit positions, with enough
750
 * padding to the left and the right, and return the constant pointer
751
 * appropriately offset.
752
 */
753
extern const unsigned long
754
	cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)];
755
 
756
static inline const struct cpumask *get_cpu_mask(unsigned int cpu)
757
{
758
	const unsigned long *p = cpu_bit_bitmap[1 + cpu % BITS_PER_LONG];
759
	p -= cpu / BITS_PER_LONG;
760
	return to_cpumask(p);
761
}
762
 
763
#define cpu_is_offline(cpu)	unlikely(!cpu_online(cpu))
764
 
765
#if NR_CPUS <= BITS_PER_LONG
766
#define CPU_BITS_ALL						\
767
{								\
6082 serge 768
	[BITS_TO_LONGS(NR_CPUS)-1] = BITMAP_LAST_WORD_MASK(NR_CPUS)	\
5270 serge 769
}
770
 
771
#else /* NR_CPUS > BITS_PER_LONG */
772
 
773
#define CPU_BITS_ALL						\
774
{								\
775
	[0 ... BITS_TO_LONGS(NR_CPUS)-2] = ~0UL,		\
6082 serge 776
	[BITS_TO_LONGS(NR_CPUS)-1] = BITMAP_LAST_WORD_MASK(NR_CPUS)	\
5270 serge 777
}
778
#endif /* NR_CPUS > BITS_PER_LONG */
779
 
780
/**
781
 * cpumap_print_to_pagebuf  - copies the cpumask into the buffer either
782
 *	as comma-separated list of cpus or hex values of cpumask
783
 * @list: indicates whether the cpumap must be list
784
 * @mask: the cpumask to copy
785
 * @buf: the buffer to copy into
786
 *
787
 * Returns the length of the (null-terminated) @buf string, zero if
788
 * nothing is copied.
789
 */
790
static inline ssize_t
791
cpumap_print_to_pagebuf(bool list, char *buf, const struct cpumask *mask)
792
{
793
	return bitmap_print_to_pagebuf(list, buf, cpumask_bits(mask),
6082 serge 794
				      nr_cpu_ids);
5270 serge 795
}
796
 
797
#if NR_CPUS <= BITS_PER_LONG
798
#define CPU_MASK_ALL							\
799
(cpumask_t) { {								\
6082 serge 800
	[BITS_TO_LONGS(NR_CPUS)-1] = BITMAP_LAST_WORD_MASK(NR_CPUS)	\
5270 serge 801
} }
802
#else
803
#define CPU_MASK_ALL							\
804
(cpumask_t) { {								\
805
	[0 ... BITS_TO_LONGS(NR_CPUS)-2] = ~0UL,			\
6082 serge 806
	[BITS_TO_LONGS(NR_CPUS)-1] = BITMAP_LAST_WORD_MASK(NR_CPUS)	\
5270 serge 807
} }
6082 serge 808
#endif /* NR_CPUS > BITS_PER_LONG */
5270 serge 809
 
810
#define CPU_MASK_NONE							\
811
(cpumask_t) { {								\
812
	[0 ... BITS_TO_LONGS(NR_CPUS)-1] =  0UL				\
813
} }
814
 
815
#define CPU_MASK_CPU0							\
816
(cpumask_t) { {								\
817
	[0] =  1UL							\
818
} }
819
 
820
#endif /* __LINUX_CPUMASK_H */