Rev 6102 | Rev 6293 | Go to most recent revision | Show entire file | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 6102 | Rev 6125 | ||
---|---|---|---|
Line 130... | Line 130... | ||
130 | struct workqueue_struct { |
130 | struct workqueue_struct { |
131 | spinlock_t lock; |
131 | spinlock_t lock; |
132 | struct list_head worklist; |
132 | struct list_head worklist; |
133 | struct list_head delayed_worklist; |
133 | struct list_head delayed_worklist; |
134 | }; |
134 | }; |
- | 135 | ||
- | 136 | /* |
|
- | 137 | * Workqueue flags and constants. For details, please refer to |
|
- | 138 | * Documentation/workqueue.txt. |
|
- | 139 | */ |
|
- | 140 | enum { |
|
- | 141 | WQ_UNBOUND = 1 << 1, /* not bound to any cpu */ |
|
- | 142 | WQ_FREEZABLE = 1 << 2, /* freeze during suspend */ |
|
- | 143 | WQ_MEM_RECLAIM = 1 << 3, /* may be used for memory reclaim */ |
|
- | 144 | WQ_HIGHPRI = 1 << 4, /* high priority */ |
|
- | 145 | WQ_CPU_INTENSIVE = 1 << 5, /* cpu intensive workqueue */ |
|
- | 146 | WQ_SYSFS = 1 << 6, /* visible in sysfs, see wq_sysfs_register() */ |
|
- | 147 | ||
- | 148 | /* |
|
- | 149 | * Per-cpu workqueues are generally preferred because they tend to |
|
- | 150 | * show better performance thanks to cache locality. Per-cpu |
|
- | 151 | * workqueues exclude the scheduler from choosing the CPU to |
|
- | 152 | * execute the worker threads, which has an unfortunate side effect |
|
- | 153 | * of increasing power consumption. |
|
- | 154 | * |
|
- | 155 | * The scheduler considers a CPU idle if it doesn't have any task |
|
- | 156 | * to execute and tries to keep idle cores idle to conserve power; |
|
- | 157 | * however, for example, a per-cpu work item scheduled from an |
|
- | 158 | * interrupt handler on an idle CPU will force the scheduler to |
|
- | 159 | * excute the work item on that CPU breaking the idleness, which in |
|
- | 160 | * turn may lead to more scheduling choices which are sub-optimal |
|
- | 161 | * in terms of power consumption. |
|
- | 162 | * |
|
- | 163 | * Workqueues marked with WQ_POWER_EFFICIENT are per-cpu by default |
|
- | 164 | * but become unbound if workqueue.power_efficient kernel param is |
|
- | 165 | * specified. Per-cpu workqueues which are identified to |
|
- | 166 | * contribute significantly to power-consumption are identified and |
|
- | 167 | * marked with this flag and enabling the power_efficient mode |
|
- | 168 | * leads to noticeable power saving at the cost of small |
|
- | 169 | * performance disadvantage. |
|
- | 170 | * |
|
- | 171 | * http://thread.gmane.org/gmane.linux.kernel/1480396 |
|
- | 172 | */ |
|
- | 173 | WQ_POWER_EFFICIENT = 1 << 7, |
|
- | 174 | ||
- | 175 | __WQ_DRAINING = 1 << 16, /* internal: workqueue is draining */ |
|
- | 176 | __WQ_ORDERED = 1 << 17, /* internal: workqueue is ordered */ |
|
- | 177 | ||
- | 178 | WQ_MAX_ACTIVE = 512, /* I like 512, better ideas? */ |
|
- | 179 | WQ_MAX_UNBOUND_PER_CPU = 4, /* 4 * #cpus for unbound wq */ |
|
- | 180 | WQ_DFL_ACTIVE = WQ_MAX_ACTIVE / 2, |
|
- | 181 | }; |
|
- | 182 | ||
- | 183 | /* unbound wq's aren't per-cpu, scale max_active according to #cpus */ |
|
- | 184 | #define WQ_UNBOUND_MAX_ACTIVE \ |
|
- | 185 | max_t(int, WQ_MAX_ACTIVE, num_possible_cpus() * WQ_MAX_UNBOUND_PER_CPU) |
|
- | 186 | ||
- | 187 | /* |
|
- | 188 | * System-wide workqueues which are always present. |
|
- | 189 | * |
|
- | 190 | * system_wq is the one used by schedule[_delayed]_work[_on](). |
|
- | 191 | * Multi-CPU multi-threaded. There are users which expect relatively |
|
- | 192 | * short queue flush time. Don't queue works which can run for too |
|
- | 193 | * long. |
|
- | 194 | * |
|
- | 195 | * system_highpri_wq is similar to system_wq but for work items which |
|
- | 196 | * require WQ_HIGHPRI. |
|
- | 197 | * |
|
- | 198 | * system_long_wq is similar to system_wq but may host long running |
|
- | 199 | * works. Queue flushing might take relatively long. |
|
- | 200 | * |
|
- | 201 | * system_unbound_wq is unbound workqueue. Workers are not bound to |
|
- | 202 | * any specific CPU, not concurrency managed, and all queued works are |
|
- | 203 | * executed immediately as long as max_active limit is not reached and |
|
- | 204 | * resources are available. |
|
- | 205 | * |
|
- | 206 | * system_freezable_wq is equivalent to system_wq except that it's |
|
- | 207 | * freezable. |
|
- | 208 | * |
|
- | 209 | * *_power_efficient_wq are inclined towards saving power and converted |
|
- | 210 | * into WQ_UNBOUND variants if 'wq_power_efficient' is enabled; otherwise, |
|
- | 211 | * they are same as their non-power-efficient counterparts - e.g. |
|
- | 212 | * system_power_efficient_wq is identical to system_wq if |
|
- | 213 | * 'wq_power_efficient' is disabled. See WQ_POWER_EFFICIENT for more info. |
|
- | 214 | */ |
|
135 | extern struct workqueue_struct *system_wq; |
215 | extern struct workqueue_struct *system_wq; |
Line 136... | Line 216... | ||
136 | 216 | ||
Line 137... | Line 217... | ||
137 | void run_workqueue(struct workqueue_struct *cwq); |
217 | void run_workqueue(struct workqueue_struct *cwq); |
138 | 218 | ||
Line -... | Line 219... | ||
- | 219 | struct workqueue_struct *alloc_workqueue_key(const char *fmt, |
|
- | 220 | unsigned int flags, int max_active); |
|
- | 221 | ||
- | 222 | /** |
|
- | 223 | * alloc_ordered_workqueue - allocate an ordered workqueue |
|
139 | struct workqueue_struct *alloc_workqueue_key(const char *fmt, |
224 | * @fmt: printf format for the name of the workqueue |
- | 225 | * @flags: WQ_* flags (only WQ_FREEZABLE and WQ_MEM_RECLAIM are meaningful) |
|
- | 226 | * @args...: args for @fmt |
|
- | 227 | * |
|
- | 228 | * Allocate an ordered workqueue. An ordered workqueue executes at |
|
- | 229 | * most one work item at any given time in the queued order. They are |
|
- | 230 | * implemented as unbound workqueues with @max_active of one. |
|
- | 231 | * |
|
140 | unsigned int flags, int max_active); |
232 | * RETURNS: |
141 | 233 | * Pointer to the allocated workqueue on success, %NULL on failure. |
|
Line 142... | Line 234... | ||
142 | 234 | */ |
|
143 | #define alloc_ordered_workqueue(fmt, flags, args...) \ |
235 | #define alloc_ordered_workqueue(fmt, flags, args...) \ |
144 | alloc_workqueue(fmt, WQ_UNBOUND | (flags), 1, ##args) |
236 | alloc_workqueue(fmt, WQ_UNBOUND | __WQ_ORDERED | (flags), 1, ##args) |