Subversion Repositories Kolibri OS

Rev

Rev 6293 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /*
  2.  * workqueue.h --- work queue handling for Linux.
  3.  */
  4.  
  5. #ifndef _LINUX_WORKQUEUE_H
  6. #define _LINUX_WORKQUEUE_H
  7.  
  8. #include <linux/timer.h>
  9. #include <linux/linkage.h>
  10. #include <linux/bitops.h>
  11. #include <linux/lockdep.h>
  12. #include <linux/threads.h>
  13. #include <linux/atomic.h>
  14. #include <linux/spinlock.h>
  15.  
  16. struct workqueue_struct;
  17.  
  18. struct work_struct;
  19. typedef void (*work_func_t)(struct work_struct *work);
  20. void __stdcall delayed_work_timer_fn(unsigned long __data);
  21.  
  22. /*
  23.  * The first word is the work queue pointer and the flags rolled into
  24.  * one
  25.  */
  26. #define work_data_bits(work) ((unsigned long *)(&(work)->data))
  27.  
  28. enum {
  29.         WORK_STRUCT_PENDING_BIT = 0,    /* work item is pending execution */
  30.         WORK_STRUCT_DELAYED_BIT = 1,    /* work item is delayed */
  31.         WORK_STRUCT_PWQ_BIT     = 2,    /* data points to pwq */
  32.         WORK_STRUCT_LINKED_BIT  = 3,    /* next work is linked to this one */
  33. #ifdef CONFIG_DEBUG_OBJECTS_WORK
  34.         WORK_STRUCT_STATIC_BIT  = 4,    /* static initializer (debugobjects) */
  35.         WORK_STRUCT_COLOR_SHIFT = 5,    /* color for workqueue flushing */
  36. #else
  37.         WORK_STRUCT_COLOR_SHIFT = 4,    /* color for workqueue flushing */
  38. #endif
  39.  
  40.         WORK_STRUCT_COLOR_BITS  = 4,
  41.  
  42.         WORK_STRUCT_PENDING     = 1 << WORK_STRUCT_PENDING_BIT,
  43.         WORK_STRUCT_DELAYED     = 1 << WORK_STRUCT_DELAYED_BIT,
  44.         WORK_STRUCT_PWQ         = 1 << WORK_STRUCT_PWQ_BIT,
  45.         WORK_STRUCT_LINKED      = 1 << WORK_STRUCT_LINKED_BIT,
  46. #ifdef CONFIG_DEBUG_OBJECTS_WORK
  47.         WORK_STRUCT_STATIC      = 1 << WORK_STRUCT_STATIC_BIT,
  48. #else
  49.         WORK_STRUCT_STATIC      = 0,
  50. #endif
  51.  
  52.         /*
  53.          * The last color is no color used for works which don't
  54.          * participate in workqueue flushing.
  55.          */
  56.         WORK_NR_COLORS          = (1 << WORK_STRUCT_COLOR_BITS) - 1,
  57.         WORK_NO_COLOR           = WORK_NR_COLORS,
  58.  
  59.         /* not bound to any CPU, prefer the local CPU */
  60.         WORK_CPU_UNBOUND        = NR_CPUS,
  61.  
  62.         /*
  63.          * Reserve 7 bits off of pwq pointer w/ debugobjects turned off.
  64.          * This makes pwqs aligned to 256 bytes and allows 15 workqueue
  65.          * flush colors.
  66.          */
  67.         WORK_STRUCT_FLAG_BITS   = WORK_STRUCT_COLOR_SHIFT +
  68.                                   WORK_STRUCT_COLOR_BITS,
  69.  
  70.         /* data contains off-queue information when !WORK_STRUCT_PWQ */
  71.         WORK_OFFQ_FLAG_BASE     = WORK_STRUCT_COLOR_SHIFT,
  72.  
  73.         __WORK_OFFQ_CANCELING   = WORK_OFFQ_FLAG_BASE,
  74.         WORK_OFFQ_CANCELING     = (1 << __WORK_OFFQ_CANCELING),
  75.  
  76.         /*
  77.          * When a work item is off queue, its high bits point to the last
  78.          * pool it was on.  Cap at 31 bits and use the highest number to
  79.          * indicate that no pool is associated.
  80.          */
  81.         WORK_OFFQ_FLAG_BITS     = 1,
  82.         WORK_OFFQ_POOL_SHIFT    = WORK_OFFQ_FLAG_BASE + WORK_OFFQ_FLAG_BITS,
  83.         WORK_OFFQ_LEFT          = BITS_PER_LONG - WORK_OFFQ_POOL_SHIFT,
  84.         WORK_OFFQ_POOL_BITS     = WORK_OFFQ_LEFT <= 31 ? WORK_OFFQ_LEFT : 31,
  85.         WORK_OFFQ_POOL_NONE     = (1LU << WORK_OFFQ_POOL_BITS) - 1,
  86.  
  87.         /* convenience constants */
  88.         WORK_STRUCT_FLAG_MASK   = (1UL << WORK_STRUCT_FLAG_BITS) - 1,
  89.         WORK_STRUCT_WQ_DATA_MASK = ~WORK_STRUCT_FLAG_MASK,
  90.         WORK_STRUCT_NO_POOL     = (unsigned long)WORK_OFFQ_POOL_NONE << WORK_OFFQ_POOL_SHIFT,
  91.  
  92.         /* bit mask for work_busy() return values */
  93.         WORK_BUSY_PENDING       = 1 << 0,
  94.         WORK_BUSY_RUNNING       = 1 << 1,
  95.  
  96.         /* maximum string length for set_worker_desc() */
  97.         WORKER_DESC_LEN         = 24,
  98. };
  99.  
  100. struct work_struct {
  101.         struct list_head entry;
  102.         struct workqueue_struct *data;
  103.         work_func_t func;
  104. #ifdef CONFIG_LOCKDEP
  105.         struct lockdep_map lockdep_map;
  106. #endif
  107. };
  108.  
  109. #define WORK_DATA_INIT()        ATOMIC_LONG_INIT(WORK_STRUCT_NO_POOL)
  110. #define WORK_DATA_STATIC_INIT() \
  111.         ATOMIC_LONG_INIT(WORK_STRUCT_NO_POOL | WORK_STRUCT_STATIC)
  112.  
  113. struct delayed_work {
  114.         struct work_struct work;
  115.         unsigned int delay;
  116.         /* target workqueue and CPU ->timer uses to queue ->work */
  117.         struct workqueue_struct *wq;
  118.         int cpu;
  119. };
  120.  
  121. static inline struct delayed_work *to_delayed_work(struct work_struct *work)
  122. {
  123.         return container_of(work, struct delayed_work, work);
  124. }
  125.  
  126. struct execute_work {
  127.         struct work_struct work;
  128. };
  129.  
  130. struct workqueue_struct {
  131.     spinlock_t lock;
  132.     struct list_head worklist;
  133.     struct list_head delayed_worklist;
  134. };
  135.  
  136. /*
  137.  * Workqueue flags and constants.  For details, please refer to
  138.  * Documentation/workqueue.txt.
  139.  */
  140. enum {
  141.         WQ_UNBOUND              = 1 << 1, /* not bound to any cpu */
  142.         WQ_FREEZABLE            = 1 << 2, /* freeze during suspend */
  143.         WQ_MEM_RECLAIM          = 1 << 3, /* may be used for memory reclaim */
  144.         WQ_HIGHPRI              = 1 << 4, /* high priority */
  145.         WQ_CPU_INTENSIVE        = 1 << 5, /* cpu intensive workqueue */
  146.         WQ_SYSFS                = 1 << 6, /* visible in sysfs, see wq_sysfs_register() */
  147.  
  148.         /*
  149.          * Per-cpu workqueues are generally preferred because they tend to
  150.          * show better performance thanks to cache locality.  Per-cpu
  151.          * workqueues exclude the scheduler from choosing the CPU to
  152.          * execute the worker threads, which has an unfortunate side effect
  153.          * of increasing power consumption.
  154.          *
  155.          * The scheduler considers a CPU idle if it doesn't have any task
  156.          * to execute and tries to keep idle cores idle to conserve power;
  157.          * however, for example, a per-cpu work item scheduled from an
  158.          * interrupt handler on an idle CPU will force the scheduler to
  159.          * excute the work item on that CPU breaking the idleness, which in
  160.          * turn may lead to more scheduling choices which are sub-optimal
  161.          * in terms of power consumption.
  162.          *
  163.          * Workqueues marked with WQ_POWER_EFFICIENT are per-cpu by default
  164.          * but become unbound if workqueue.power_efficient kernel param is
  165.          * specified.  Per-cpu workqueues which are identified to
  166.          * contribute significantly to power-consumption are identified and
  167.          * marked with this flag and enabling the power_efficient mode
  168.          * leads to noticeable power saving at the cost of small
  169.          * performance disadvantage.
  170.          *
  171.          * http://thread.gmane.org/gmane.linux.kernel/1480396
  172.          */
  173.         WQ_POWER_EFFICIENT      = 1 << 7,
  174.  
  175.         __WQ_DRAINING           = 1 << 16, /* internal: workqueue is draining */
  176.         __WQ_ORDERED            = 1 << 17, /* internal: workqueue is ordered */
  177.  
  178.         WQ_MAX_ACTIVE           = 512,    /* I like 512, better ideas? */
  179.         WQ_MAX_UNBOUND_PER_CPU  = 4,      /* 4 * #cpus for unbound wq */
  180.         WQ_DFL_ACTIVE           = WQ_MAX_ACTIVE / 2,
  181. };
  182.  
  183. /* unbound wq's aren't per-cpu, scale max_active according to #cpus */
  184. #define WQ_UNBOUND_MAX_ACTIVE   \
  185.         max_t(int, WQ_MAX_ACTIVE, num_possible_cpus() * WQ_MAX_UNBOUND_PER_CPU)
  186.  
  187. /*
  188.  * System-wide workqueues which are always present.
  189.  *
  190.  * system_wq is the one used by schedule[_delayed]_work[_on]().
  191.  * Multi-CPU multi-threaded.  There are users which expect relatively
  192.  * short queue flush time.  Don't queue works which can run for too
  193.  * long.
  194.  *
  195.  * system_highpri_wq is similar to system_wq but for work items which
  196.  * require WQ_HIGHPRI.
  197.  *
  198.  * system_long_wq is similar to system_wq but may host long running
  199.  * works.  Queue flushing might take relatively long.
  200.  *
  201.  * system_unbound_wq is unbound workqueue.  Workers are not bound to
  202.  * any specific CPU, not concurrency managed, and all queued works are
  203.  * executed immediately as long as max_active limit is not reached and
  204.  * resources are available.
  205.  *
  206.  * system_freezable_wq is equivalent to system_wq except that it's
  207.  * freezable.
  208.  *
  209.  * *_power_efficient_wq are inclined towards saving power and converted
  210.  * into WQ_UNBOUND variants if 'wq_power_efficient' is enabled; otherwise,
  211.  * they are same as their non-power-efficient counterparts - e.g.
  212.  * system_power_efficient_wq is identical to system_wq if
  213.  * 'wq_power_efficient' is disabled.  See WQ_POWER_EFFICIENT for more info.
  214.  */
  215. extern struct workqueue_struct *system_wq;
  216.  
  217. void run_workqueue(struct workqueue_struct *cwq);
  218.  
  219. struct workqueue_struct *alloc_workqueue_key(const char *fmt,
  220.                            unsigned int flags, int max_active);
  221. struct workqueue_struct *alloc_workqueue(const char *fmt,
  222.                            unsigned int flags,
  223.                            int max_active);
  224.  
  225. /**
  226.  * alloc_ordered_workqueue - allocate an ordered workqueue
  227.  * @fmt: printf format for the name of the workqueue
  228.  * @flags: WQ_* flags (only WQ_FREEZABLE and WQ_MEM_RECLAIM are meaningful)
  229.  * @args...: args for @fmt
  230.  *
  231.  * Allocate an ordered workqueue.  An ordered workqueue executes at
  232.  * most one work item at any given time in the queued order.  They are
  233.  * implemented as unbound workqueues with @max_active of one.
  234.  *
  235.  * RETURNS:
  236.  * Pointer to the allocated workqueue on success, %NULL on failure.
  237.  */
  238. #define alloc_ordered_workqueue(fmt, flags, args...)                    \
  239.         alloc_workqueue(fmt, WQ_UNBOUND | __WQ_ORDERED | (flags), 1, ##args)
  240.  
  241. bool queue_work(struct workqueue_struct *wq, struct work_struct *work);
  242. int queue_delayed_work(struct workqueue_struct *wq,
  243.                         struct delayed_work *dwork, unsigned long delay);
  244.  
  245. bool schedule_delayed_work(struct delayed_work *dwork, unsigned long delay);
  246.  
  247.  
  248. #define INIT_WORK(_work, _func)                 \
  249.     do {                                        \
  250.         INIT_LIST_HEAD(&(_work)->entry);        \
  251.         (_work)->func = _func;                  \
  252.     } while (0)
  253.  
  254.  
  255. #define INIT_DELAYED_WORK(_work, _func)         \
  256.     do {                                        \
  257.         INIT_LIST_HEAD(&(_work)->work.entry);   \
  258.         (_work)->work.func = _func;             \
  259.     } while (0)
  260.  
  261. static inline bool schedule_work(struct work_struct *work)
  262. {
  263.     return queue_work(system_wq, work);
  264. }
  265.  
  266.  
  267. #endif  /*  _LINUX_WORKQUEUE_H  */
  268.