Subversion Repositories Kolibri OS

Rev

Rev 4112 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /*
  2.  * Copyright (c) Red Hat Inc.
  3.  
  4.  * Permission is hereby granted, free of charge, to any person obtaining a
  5.  * copy of this software and associated documentation files (the "Software"),
  6.  * to deal in the Software without restriction, including without limitation
  7.  * the rights to use, copy, modify, merge, publish, distribute, sub license,
  8.  * and/or sell copies of the Software, and to permit persons to whom the
  9.  * Software is furnished to do so, subject to the following conditions:
  10.  *
  11.  * The above copyright notice and this permission notice (including the
  12.  * next paragraph) shall be included in all copies or substantial portions
  13.  * of the Software.
  14.  *
  15.  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16.  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17.  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  18.  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19.  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20.  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
  21.  * DEALINGS IN THE SOFTWARE.
  22.  *
  23.  * Authors: Dave Airlie <airlied@redhat.com>
  24.  *          Jerome Glisse <jglisse@redhat.com>
  25.  *          Pauli Nieminen <suokkos@gmail.com>
  26.  */
  27.  
  28. /* simple list based uncached page pool
  29.  * - Pool collects resently freed pages for reuse
  30.  * - Use page->lru to keep a free list
  31.  * - doesn't track currently in use pages
  32.  */
  33.  
  34. #define pr_fmt(fmt) "[TTM] " fmt
  35.  
  36. #include <linux/list.h>
  37. #include <linux/spinlock.h>
  38. //#include <linux/highmem.h>
  39. //#include <linux/mm_types.h>
  40. #include <linux/module.h>
  41. #include <linux/mm.h>
  42. #include <linux/seq_file.h> /* for seq_printf */
  43. #include <linux/slab.h>
  44. //#include <linux/dma-mapping.h>
  45.  
  46. //#include <linux/atomic.h>
  47.  
  48. #include <drm/ttm/ttm_bo_driver.h>
  49. #include <drm/ttm/ttm_page_alloc.h>
  50.  
  51. #ifdef TTM_HAS_AGP
  52. #include <asm/agp.h>
  53. #endif
  54.  
  55. #define NUM_PAGES_TO_ALLOC              (PAGE_SIZE/sizeof(struct page *))
  56. #define SMALL_ALLOCATION                16
  57. #define FREE_ALL_PAGES                  (~0U)
  58. /* times are in msecs */
  59. #define PAGE_FREE_INTERVAL              1000
  60.  
  61. #define pr_err(fmt, ...) \
  62.         printk(KERN_ERR pr_fmt(fmt), ##__VA_ARGS__)
  63.  
  64.  
  65.  
  66. #if 0
  67. /**
  68.  * struct ttm_page_pool - Pool to reuse recently allocated uc/wc pages.
  69.  *
  70.  * @lock: Protects the shared pool from concurrnet access. Must be used with
  71.  * irqsave/irqrestore variants because pool allocator maybe called from
  72.  * delayed work.
  73.  * @fill_lock: Prevent concurrent calls to fill.
  74.  * @list: Pool of free uc/wc pages for fast reuse.
  75.  * @gfp_flags: Flags to pass for alloc_page.
  76.  * @npages: Number of pages in pool.
  77.  */
  78. struct ttm_page_pool {
  79.     spinlock_t          lock;
  80.     bool                fill_lock;
  81.         struct list_head        list;
  82.     gfp_t               gfp_flags;
  83.     unsigned            npages;
  84.     char                *name;
  85.         unsigned long           nfrees;
  86.         unsigned long           nrefills;
  87. };
  88.  
  89. /**
  90.  * Limits for the pool. They are handled without locks because only place where
  91.  * they may change is in sysfs store. They won't have immediate effect anyway
  92.  * so forcing serialization to access them is pointless.
  93.  */
  94.  
  95. struct ttm_pool_opts {
  96.         unsigned        alloc_size;
  97.         unsigned        max_size;
  98.         unsigned        small;
  99. };
  100.  
  101. #define NUM_POOLS 4
  102.  
  103. /**
  104.  * struct ttm_pool_manager - Holds memory pools for fst allocation
  105.  *
  106.  * Manager is read only object for pool code so it doesn't need locking.
  107.  *
  108.  * @free_interval: minimum number of jiffies between freeing pages from pool.
  109.  * @page_alloc_inited: reference counting for pool allocation.
  110.  * @work: Work that is used to shrink the pool. Work is only run when there is
  111.  * some pages to free.
  112.  * @small_allocation: Limit in number of pages what is small allocation.
  113.  *
  114.  * @pools: All pool objects in use.
  115.  **/
  116. struct ttm_pool_manager {
  117.         struct kobject          kobj;
  118.         struct shrinker         mm_shrink;
  119.         struct ttm_pool_opts    options;
  120.  
  121.         union {
  122.                 struct ttm_page_pool    pools[NUM_POOLS];
  123.                 struct {
  124.                         struct ttm_page_pool    wc_pool;
  125.                         struct ttm_page_pool    uc_pool;
  126.                         struct ttm_page_pool    wc_pool_dma32;
  127.                         struct ttm_page_pool    uc_pool_dma32;
  128.                 } ;
  129.         };
  130. };
  131.  
  132. static struct attribute ttm_page_pool_max = {
  133.         .name = "pool_max_size",
  134.         .mode = S_IRUGO | S_IWUSR
  135. };
  136. static struct attribute ttm_page_pool_small = {
  137.         .name = "pool_small_allocation",
  138.         .mode = S_IRUGO | S_IWUSR
  139. };
  140. static struct attribute ttm_page_pool_alloc_size = {
  141.         .name = "pool_allocation_size",
  142.         .mode = S_IRUGO | S_IWUSR
  143. };
  144.  
  145. static struct attribute *ttm_pool_attrs[] = {
  146.         &ttm_page_pool_max,
  147.         &ttm_page_pool_small,
  148.         &ttm_page_pool_alloc_size,
  149.         NULL
  150. };
  151.  
  152. static void ttm_pool_kobj_release(struct kobject *kobj)
  153. {
  154.         struct ttm_pool_manager *m =
  155.                 container_of(kobj, struct ttm_pool_manager, kobj);
  156.         kfree(m);
  157. }
  158.  
  159. static ssize_t ttm_pool_store(struct kobject *kobj,
  160.                 struct attribute *attr, const char *buffer, size_t size)
  161. {
  162.         struct ttm_pool_manager *m =
  163.                 container_of(kobj, struct ttm_pool_manager, kobj);
  164.         int chars;
  165.         unsigned val;
  166.         chars = sscanf(buffer, "%u", &val);
  167.         if (chars == 0)
  168.                 return size;
  169.  
  170.         /* Convert kb to number of pages */
  171.         val = val / (PAGE_SIZE >> 10);
  172.  
  173.         if (attr == &ttm_page_pool_max)
  174.                 m->options.max_size = val;
  175.         else if (attr == &ttm_page_pool_small)
  176.                 m->options.small = val;
  177.         else if (attr == &ttm_page_pool_alloc_size) {
  178.                 if (val > NUM_PAGES_TO_ALLOC*8) {
  179.                         pr_err("Setting allocation size to %lu is not allowed. Recommended size is %lu\n",
  180.                                NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 7),
  181.                                NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
  182.                         return size;
  183.                 } else if (val > NUM_PAGES_TO_ALLOC) {
  184.                         pr_warn("Setting allocation size to larger than %lu is not recommended\n",
  185.                                 NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
  186.                 }
  187.                 m->options.alloc_size = val;
  188.         }
  189.  
  190.         return size;
  191. }
  192.  
  193. static ssize_t ttm_pool_show(struct kobject *kobj,
  194.                 struct attribute *attr, char *buffer)
  195. {
  196.         struct ttm_pool_manager *m =
  197.                 container_of(kobj, struct ttm_pool_manager, kobj);
  198.         unsigned val = 0;
  199.  
  200.         if (attr == &ttm_page_pool_max)
  201.                 val = m->options.max_size;
  202.         else if (attr == &ttm_page_pool_small)
  203.                 val = m->options.small;
  204.         else if (attr == &ttm_page_pool_alloc_size)
  205.                 val = m->options.alloc_size;
  206.  
  207.         val = val * (PAGE_SIZE >> 10);
  208.  
  209.         return snprintf(buffer, PAGE_SIZE, "%u\n", val);
  210. }
  211.  
  212. static const struct sysfs_ops ttm_pool_sysfs_ops = {
  213.         .show = &ttm_pool_show,
  214.         .store = &ttm_pool_store,
  215. };
  216.  
  217. static struct kobj_type ttm_pool_kobj_type = {
  218.         .release = &ttm_pool_kobj_release,
  219.         .sysfs_ops = &ttm_pool_sysfs_ops,
  220.         .default_attrs = ttm_pool_attrs,
  221. };
  222.  
  223. static struct ttm_pool_manager *_manager;
  224.  
  225. #ifndef CONFIG_X86
  226. static int set_pages_array_wb(struct page **pages, int addrinarray)
  227. {
  228. #ifdef TTM_HAS_AGP
  229.         int i;
  230.  
  231.         for (i = 0; i < addrinarray; i++)
  232.                 unmap_page_from_agp(pages[i]);
  233. #endif
  234.         return 0;
  235. }
  236.  
  237. static int set_pages_array_wc(struct page **pages, int addrinarray)
  238. {
  239. #ifdef TTM_HAS_AGP
  240.         int i;
  241.  
  242.         for (i = 0; i < addrinarray; i++)
  243.                 map_page_into_agp(pages[i]);
  244. #endif
  245.         return 0;
  246. }
  247.  
  248. static int set_pages_array_uc(struct page **pages, int addrinarray)
  249. {
  250. #ifdef TTM_HAS_AGP
  251.         int i;
  252.  
  253.         for (i = 0; i < addrinarray; i++)
  254.                 map_page_into_agp(pages[i]);
  255. #endif
  256.         return 0;
  257. }
  258. #endif
  259.  
  260. /**
  261.  * Select the right pool or requested caching state and ttm flags. */
  262. static struct ttm_page_pool *ttm_get_pool(int flags,
  263.                 enum ttm_caching_state cstate)
  264. {
  265.         int pool_index;
  266.  
  267.         if (cstate == tt_cached)
  268.                 return NULL;
  269.  
  270.         if (cstate == tt_wc)
  271.                 pool_index = 0x0;
  272.         else
  273.                 pool_index = 0x1;
  274.  
  275.         if (flags & TTM_PAGE_FLAG_DMA32)
  276.                 pool_index |= 0x2;
  277.  
  278.         return &_manager->pools[pool_index];
  279. }
  280.  
  281. /* set memory back to wb and free the pages. */
  282. static void ttm_pages_put(struct page *pages[], unsigned npages)
  283. {
  284.         unsigned i;
  285.         if (set_pages_array_wb(pages, npages))
  286.                 pr_err("Failed to set %d pages to wb!\n", npages);
  287.         for (i = 0; i < npages; ++i)
  288.                 __free_page(pages[i]);
  289. }
  290.  
  291. static void ttm_pool_update_free_locked(struct ttm_page_pool *pool,
  292.                 unsigned freed_pages)
  293. {
  294.         pool->npages -= freed_pages;
  295.         pool->nfrees += freed_pages;
  296. }
  297.  
  298. /**
  299.  * Free pages from pool.
  300.  *
  301.  * To prevent hogging the ttm_swap process we only free NUM_PAGES_TO_ALLOC
  302.  * number of pages in one go.
  303.  *
  304.  * @pool: to free the pages from
  305.  * @free_all: If set to true will free all pages in pool
  306.  **/
  307. static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free)
  308. {
  309.         unsigned long irq_flags;
  310.         struct page *p;
  311.         struct page **pages_to_free;
  312.         unsigned freed_pages = 0,
  313.                  npages_to_free = nr_free;
  314.  
  315.         if (NUM_PAGES_TO_ALLOC < nr_free)
  316.                 npages_to_free = NUM_PAGES_TO_ALLOC;
  317.  
  318.         pages_to_free = kmalloc(npages_to_free * sizeof(struct page *),
  319.                         GFP_KERNEL);
  320.         if (!pages_to_free) {
  321.                 pr_err("Failed to allocate memory for pool free operation\n");
  322.                 return 0;
  323.         }
  324.  
  325. restart:
  326.         spin_lock_irqsave(&pool->lock, irq_flags);
  327.  
  328.         list_for_each_entry_reverse(p, &pool->list, lru) {
  329.                 if (freed_pages >= npages_to_free)
  330.                         break;
  331.  
  332.                 pages_to_free[freed_pages++] = p;
  333.                 /* We can only remove NUM_PAGES_TO_ALLOC at a time. */
  334.                 if (freed_pages >= NUM_PAGES_TO_ALLOC) {
  335.                         /* remove range of pages from the pool */
  336.                         __list_del(p->lru.prev, &pool->list);
  337.  
  338.                         ttm_pool_update_free_locked(pool, freed_pages);
  339.                         /**
  340.                          * Because changing page caching is costly
  341.                          * we unlock the pool to prevent stalling.
  342.                          */
  343.                         spin_unlock_irqrestore(&pool->lock, irq_flags);
  344.  
  345.                         ttm_pages_put(pages_to_free, freed_pages);
  346.                         if (likely(nr_free != FREE_ALL_PAGES))
  347.                                 nr_free -= freed_pages;
  348.  
  349.                         if (NUM_PAGES_TO_ALLOC >= nr_free)
  350.                                 npages_to_free = nr_free;
  351.                         else
  352.                                 npages_to_free = NUM_PAGES_TO_ALLOC;
  353.  
  354.                         freed_pages = 0;
  355.  
  356.                         /* free all so restart the processing */
  357.                         if (nr_free)
  358.                                 goto restart;
  359.  
  360.                         /* Not allowed to fall through or break because
  361.                          * following context is inside spinlock while we are
  362.                          * outside here.
  363.                          */
  364.                         goto out;
  365.  
  366.                 }
  367.         }
  368.  
  369.         /* remove range of pages from the pool */
  370.         if (freed_pages) {
  371.                 __list_del(&p->lru, &pool->list);
  372.  
  373.                 ttm_pool_update_free_locked(pool, freed_pages);
  374.                 nr_free -= freed_pages;
  375.         }
  376.  
  377.         spin_unlock_irqrestore(&pool->lock, irq_flags);
  378.  
  379.         if (freed_pages)
  380.                 ttm_pages_put(pages_to_free, freed_pages);
  381. out:
  382.         kfree(pages_to_free);
  383.         return nr_free;
  384. }
  385.  
  386. /* Get good estimation how many pages are free in pools */
  387. static int ttm_pool_get_num_unused_pages(void)
  388. {
  389.         unsigned i;
  390.         int total = 0;
  391.         for (i = 0; i < NUM_POOLS; ++i)
  392.                 total += _manager->pools[i].npages;
  393.  
  394.         return total;
  395. }
  396.  
  397. /**
  398.  * Callback for mm to request pool to reduce number of page held.
  399.  */
  400. static int ttm_pool_mm_shrink(struct shrinker *shrink,
  401.                               struct shrink_control *sc)
  402. {
  403.         static atomic_t start_pool = ATOMIC_INIT(0);
  404.         unsigned i;
  405.         unsigned pool_offset = atomic_add_return(1, &start_pool);
  406.         struct ttm_page_pool *pool;
  407.         int shrink_pages = sc->nr_to_scan;
  408.  
  409.         pool_offset = pool_offset % NUM_POOLS;
  410.         /* select start pool in round robin fashion */
  411.         for (i = 0; i < NUM_POOLS; ++i) {
  412.                 unsigned nr_free = shrink_pages;
  413.                 if (shrink_pages == 0)
  414.                         break;
  415.                 pool = &_manager->pools[(i + pool_offset)%NUM_POOLS];
  416.                 shrink_pages = ttm_page_pool_free(pool, nr_free);
  417.         }
  418.         /* return estimated number of unused pages in pool */
  419.         return ttm_pool_get_num_unused_pages();
  420. }
  421.  
  422. static void ttm_pool_mm_shrink_init(struct ttm_pool_manager *manager)
  423. {
  424.         manager->mm_shrink.shrink = &ttm_pool_mm_shrink;
  425.         manager->mm_shrink.seeks = 1;
  426.         register_shrinker(&manager->mm_shrink);
  427. }
  428.  
  429. static void ttm_pool_mm_shrink_fini(struct ttm_pool_manager *manager)
  430. {
  431.         unregister_shrinker(&manager->mm_shrink);
  432. }
  433.  
  434. static int ttm_set_pages_caching(struct page **pages,
  435.                 enum ttm_caching_state cstate, unsigned cpages)
  436. {
  437.         int r = 0;
  438.         /* Set page caching */
  439.         switch (cstate) {
  440.         case tt_uncached:
  441.                 r = set_pages_array_uc(pages, cpages);
  442.                 if (r)
  443.                         pr_err("Failed to set %d pages to uc!\n", cpages);
  444.                 break;
  445.         case tt_wc:
  446.                 r = set_pages_array_wc(pages, cpages);
  447.                 if (r)
  448.                         pr_err("Failed to set %d pages to wc!\n", cpages);
  449.                 break;
  450.         default:
  451.                 break;
  452.         }
  453.         return r;
  454. }
  455.  
  456. /**
  457.  * Free pages the pages that failed to change the caching state. If there is
  458.  * any pages that have changed their caching state already put them to the
  459.  * pool.
  460.  */
  461. static void ttm_handle_caching_state_failure(struct list_head *pages,
  462.                 int ttm_flags, enum ttm_caching_state cstate,
  463.                 struct page **failed_pages, unsigned cpages)
  464. {
  465.         unsigned i;
  466.         /* Failed pages have to be freed */
  467.         for (i = 0; i < cpages; ++i) {
  468.                 list_del(&failed_pages[i]->lru);
  469.                 __free_page(failed_pages[i]);
  470.         }
  471. }
  472.  
  473. /**
  474.  * Allocate new pages with correct caching.
  475.  *
  476.  * This function is reentrant if caller updates count depending on number of
  477.  * pages returned in pages array.
  478.  */
  479. static int ttm_alloc_new_pages(struct list_head *pages, gfp_t gfp_flags,
  480.                 int ttm_flags, enum ttm_caching_state cstate, unsigned count)
  481. {
  482.         struct page **caching_array;
  483.         struct page *p;
  484.         int r = 0;
  485.         unsigned i, cpages;
  486.         unsigned max_cpages = min(count,
  487.                         (unsigned)(PAGE_SIZE/sizeof(struct page *)));
  488.  
  489.         /* allocate array for page caching change */
  490.         caching_array = kmalloc(max_cpages*sizeof(struct page *), GFP_KERNEL);
  491.  
  492.         if (!caching_array) {
  493.                 pr_err("Unable to allocate table for new pages\n");
  494.                 return -ENOMEM;
  495.         }
  496.  
  497.         for (i = 0, cpages = 0; i < count; ++i) {
  498.                 p = alloc_page(gfp_flags);
  499.  
  500.                 if (!p) {
  501.                         pr_err("Unable to get page %u\n", i);
  502.  
  503.                         /* store already allocated pages in the pool after
  504.                          * setting the caching state */
  505.                         if (cpages) {
  506.                                 r = ttm_set_pages_caching(caching_array,
  507.                                                           cstate, cpages);
  508.                                 if (r)
  509.                                         ttm_handle_caching_state_failure(pages,
  510.                                                 ttm_flags, cstate,
  511.                                                 caching_array, cpages);
  512.                         }
  513.                         r = -ENOMEM;
  514.                         goto out;
  515.                 }
  516.  
  517. #ifdef CONFIG_HIGHMEM
  518.                 /* gfp flags of highmem page should never be dma32 so we
  519.                  * we should be fine in such case
  520.                  */
  521.                 if (!PageHighMem(p))
  522. #endif
  523.                 {
  524.                         caching_array[cpages++] = p;
  525.                         if (cpages == max_cpages) {
  526.  
  527.                                 r = ttm_set_pages_caching(caching_array,
  528.                                                 cstate, cpages);
  529.                                 if (r) {
  530.                                         ttm_handle_caching_state_failure(pages,
  531.                                                 ttm_flags, cstate,
  532.                                                 caching_array, cpages);
  533.                                         goto out;
  534.                                 }
  535.                                 cpages = 0;
  536.                         }
  537.                 }
  538.  
  539.                 list_add(&p->lru, pages);
  540.         }
  541.  
  542.         if (cpages) {
  543.                 r = ttm_set_pages_caching(caching_array, cstate, cpages);
  544.                 if (r)
  545.                         ttm_handle_caching_state_failure(pages,
  546.                                         ttm_flags, cstate,
  547.                                         caching_array, cpages);
  548.         }
  549. out:
  550.         kfree(caching_array);
  551.  
  552.         return r;
  553. }
  554.  
  555. /**
  556.  * Fill the given pool if there aren't enough pages and the requested number of
  557.  * pages is small.
  558.  */
  559. static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool,
  560.                 int ttm_flags, enum ttm_caching_state cstate, unsigned count,
  561.                 unsigned long *irq_flags)
  562. {
  563.         struct page *p;
  564.         int r;
  565.         unsigned cpages = 0;
  566.         /**
  567.          * Only allow one pool fill operation at a time.
  568.          * If pool doesn't have enough pages for the allocation new pages are
  569.          * allocated from outside of pool.
  570.          */
  571.         if (pool->fill_lock)
  572.                 return;
  573.  
  574.         pool->fill_lock = true;
  575.  
  576.         /* If allocation request is small and there are not enough
  577.          * pages in a pool we fill the pool up first. */
  578.         if (count < _manager->options.small
  579.                 && count > pool->npages) {
  580.                 struct list_head new_pages;
  581.                 unsigned alloc_size = _manager->options.alloc_size;
  582.  
  583.                 /**
  584.                  * Can't change page caching if in irqsave context. We have to
  585.                  * drop the pool->lock.
  586.                  */
  587.                 spin_unlock_irqrestore(&pool->lock, *irq_flags);
  588.  
  589.                 INIT_LIST_HEAD(&new_pages);
  590.                 r = ttm_alloc_new_pages(&new_pages, pool->gfp_flags, ttm_flags,
  591.                                 cstate, alloc_size);
  592.                 spin_lock_irqsave(&pool->lock, *irq_flags);
  593.  
  594.                 if (!r) {
  595.                         list_splice(&new_pages, &pool->list);
  596.                         ++pool->nrefills;
  597.                         pool->npages += alloc_size;
  598.                 } else {
  599.                         pr_err("Failed to fill pool (%p)\n", pool);
  600.                         /* If we have any pages left put them to the pool. */
  601.                         list_for_each_entry(p, &pool->list, lru) {
  602.                                 ++cpages;
  603.                         }
  604.                         list_splice(&new_pages, &pool->list);
  605.                         pool->npages += cpages;
  606.                 }
  607.  
  608.         }
  609.         pool->fill_lock = false;
  610. }
  611.  
  612. /**
  613.  * Cut 'count' number of pages from the pool and put them on the return list.
  614.  *
  615.  * @return count of pages still required to fulfill the request.
  616.  */
  617. static unsigned ttm_page_pool_get_pages(struct ttm_page_pool *pool,
  618.                                         struct list_head *pages,
  619.                                         int ttm_flags,
  620.                                         enum ttm_caching_state cstate,
  621.                                         unsigned count)
  622. {
  623.         unsigned long irq_flags;
  624.         struct list_head *p;
  625.         unsigned i;
  626.  
  627.         spin_lock_irqsave(&pool->lock, irq_flags);
  628.         ttm_page_pool_fill_locked(pool, ttm_flags, cstate, count, &irq_flags);
  629.  
  630.         if (count >= pool->npages) {
  631.                 /* take all pages from the pool */
  632.                 list_splice_init(&pool->list, pages);
  633.                 count -= pool->npages;
  634.                 pool->npages = 0;
  635.                 goto out;
  636.         }
  637.         /* find the last pages to include for requested number of pages. Split
  638.          * pool to begin and halve it to reduce search space. */
  639.         if (count <= pool->npages/2) {
  640.                 i = 0;
  641.                 list_for_each(p, &pool->list) {
  642.                         if (++i == count)
  643.                                 break;
  644.                 }
  645.         } else {
  646.                 i = pool->npages + 1;
  647.                 list_for_each_prev(p, &pool->list) {
  648.                         if (--i == count)
  649.                                 break;
  650.                 }
  651.         }
  652.         /* Cut 'count' number of pages from the pool */
  653.         list_cut_position(pages, &pool->list, p);
  654.         pool->npages -= count;
  655.         count = 0;
  656. out:
  657.         spin_unlock_irqrestore(&pool->lock, irq_flags);
  658.         return count;
  659. }
  660. #endif
  661.  
  662. /* Put all pages in pages list to correct pool to wait for reuse */
  663. static void ttm_put_pages(struct page **pages, unsigned npages, int flags,
  664.                           enum ttm_caching_state cstate)
  665. {
  666.         unsigned long irq_flags;
  667. //   struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
  668.         unsigned i;
  669.  
  670.     for (i = 0; i < npages; i++) {
  671.         if (pages[i]) {
  672. //            if (page_count(pages[i]) != 1)
  673. //                pr_err("Erroneous page count. Leaking pages.\n");
  674.             FreePage(pages[i]);
  675.             pages[i] = NULL;
  676.         }
  677.     }
  678.     return;
  679.  
  680. #if 0
  681.         if (pool == NULL) {
  682.                 /* No pool for this memory type so free the pages */
  683.                 for (i = 0; i < npages; i++) {
  684.                         if (pages[i]) {
  685.                                 if (page_count(pages[i]) != 1)
  686.                                         pr_err("Erroneous page count. Leaking pages.\n");
  687.                                 __free_page(pages[i]);
  688.                                 pages[i] = NULL;
  689.                         }
  690.                 }
  691.                 return;
  692.         }
  693.  
  694.         spin_lock_irqsave(&pool->lock, irq_flags);
  695.         for (i = 0; i < npages; i++) {
  696.                 if (pages[i]) {
  697.                         if (page_count(pages[i]) != 1)
  698.                                 pr_err("Erroneous page count. Leaking pages.\n");
  699.                         list_add_tail(&pages[i]->lru, &pool->list);
  700.                         pages[i] = NULL;
  701.                         pool->npages++;
  702.                 }
  703.         }
  704.         /* Check that we don't go over the pool limit */
  705.         npages = 0;
  706.         if (pool->npages > _manager->options.max_size) {
  707.                 npages = pool->npages - _manager->options.max_size;
  708.                 /* free at least NUM_PAGES_TO_ALLOC number of pages
  709.                  * to reduce calls to set_memory_wb */
  710.                 if (npages < NUM_PAGES_TO_ALLOC)
  711.                         npages = NUM_PAGES_TO_ALLOC;
  712.         }
  713.         spin_unlock_irqrestore(&pool->lock, irq_flags);
  714.         if (npages)
  715.                 ttm_page_pool_free(pool, npages);
  716. #endif
  717.  
  718. }
  719.  
  720. /*
  721.  * On success pages list will hold count number of correctly
  722.  * cached pages.
  723.  */
  724. static int ttm_get_pages(struct page **pages, unsigned npages, int flags,
  725.                          enum ttm_caching_state cstate)
  726. {
  727. //   struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
  728.         struct list_head plist;
  729.         struct page *p = NULL;
  730. //   gfp_t gfp_flags = GFP_USER;
  731.         unsigned count;
  732.         int r;
  733.  
  734.     for (r = 0; r < npages; ++r) {
  735.         p = AllocPage();
  736.         if (!p) {
  737.  
  738.             pr_err("Unable to allocate page\n");
  739.             return -ENOMEM;
  740.         }
  741.  
  742.         pages[r] = p;
  743.     }
  744.     return 0;
  745.  
  746. #if 0
  747.  
  748.  
  749.         /* set zero flag for page allocation if required */
  750.         if (flags & TTM_PAGE_FLAG_ZERO_ALLOC)
  751.                 gfp_flags |= __GFP_ZERO;
  752.  
  753.         /* No pool for cached pages */
  754.         if (pool == NULL) {
  755.                 if (flags & TTM_PAGE_FLAG_DMA32)
  756.                         gfp_flags |= GFP_DMA32;
  757.                 else
  758.                         gfp_flags |= GFP_HIGHUSER;
  759.  
  760.                 for (r = 0; r < npages; ++r) {
  761.                         p = alloc_page(gfp_flags);
  762.                         if (!p) {
  763.  
  764.                                 pr_err("Unable to allocate page\n");
  765.                                 return -ENOMEM;
  766.                         }
  767.  
  768.                         pages[r] = p;
  769.                 }
  770.                 return 0;
  771.         }
  772.  
  773.         /* combine zero flag to pool flags */
  774.         gfp_flags |= pool->gfp_flags;
  775.  
  776.         /* First we take pages from the pool */
  777.         INIT_LIST_HEAD(&plist);
  778.         npages = ttm_page_pool_get_pages(pool, &plist, flags, cstate, npages);
  779.         count = 0;
  780.         list_for_each_entry(p, &plist, lru) {
  781.                 pages[count++] = p;
  782.         }
  783.  
  784.         /* clear the pages coming from the pool if requested */
  785.         if (flags & TTM_PAGE_FLAG_ZERO_ALLOC) {
  786.                 list_for_each_entry(p, &plist, lru) {
  787.                         if (PageHighMem(p))
  788.                                 clear_highpage(p);
  789.                         else
  790.                                 clear_page(page_address(p));
  791.                 }
  792.         }
  793.  
  794.         /* If pool didn't have enough pages allocate new one. */
  795.         if (npages > 0) {
  796.                 /* ttm_alloc_new_pages doesn't reference pool so we can run
  797.                  * multiple requests in parallel.
  798.                  **/
  799.                 INIT_LIST_HEAD(&plist);
  800.                 r = ttm_alloc_new_pages(&plist, gfp_flags, flags, cstate, npages);
  801.                 list_for_each_entry(p, &plist, lru) {
  802.                         pages[count++] = p;
  803.                 }
  804.                 if (r) {
  805.                         /* If there is any pages in the list put them back to
  806.                          * the pool. */
  807.                         pr_err("Failed to allocate extra pages for large request\n");
  808.                         ttm_put_pages(pages, count, flags, cstate);
  809.                         return r;
  810.                 }
  811.         }
  812. #endif
  813.  
  814.         return 0;
  815. }
  816.  
  817. #if 0
  818. static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, int flags,
  819.                 char *name)
  820. {
  821.         spin_lock_init(&pool->lock);
  822.         pool->fill_lock = false;
  823.         INIT_LIST_HEAD(&pool->list);
  824.         pool->npages = pool->nfrees = 0;
  825.         pool->gfp_flags = flags;
  826.         pool->name = name;
  827. }
  828.  
  829. int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
  830. {
  831.         int ret;
  832.  
  833.         WARN_ON(_manager);
  834.  
  835.         pr_info("Initializing pool allocator\n");
  836.  
  837.         _manager = kzalloc(sizeof(*_manager), GFP_KERNEL);
  838.  
  839.         ttm_page_pool_init_locked(&_manager->wc_pool, GFP_HIGHUSER, "wc");
  840.  
  841.         ttm_page_pool_init_locked(&_manager->uc_pool, GFP_HIGHUSER, "uc");
  842.  
  843.         ttm_page_pool_init_locked(&_manager->wc_pool_dma32,
  844.                                   GFP_USER | GFP_DMA32, "wc dma");
  845.  
  846.         ttm_page_pool_init_locked(&_manager->uc_pool_dma32,
  847.                                   GFP_USER | GFP_DMA32, "uc dma");
  848.  
  849.         _manager->options.max_size = max_pages;
  850.         _manager->options.small = SMALL_ALLOCATION;
  851.         _manager->options.alloc_size = NUM_PAGES_TO_ALLOC;
  852.  
  853.         ret = kobject_init_and_add(&_manager->kobj, &ttm_pool_kobj_type,
  854.                                    &glob->kobj, "pool");
  855.         if (unlikely(ret != 0)) {
  856.                 kobject_put(&_manager->kobj);
  857.                 _manager = NULL;
  858.                 return ret;
  859.         }
  860.  
  861.         ttm_pool_mm_shrink_init(_manager);
  862.  
  863.         return 0;
  864. }
  865.  
  866. void ttm_page_alloc_fini(void)
  867. {
  868.         int i;
  869.  
  870.         pr_info("Finalizing pool allocator\n");
  871.         ttm_pool_mm_shrink_fini(_manager);
  872.  
  873.         for (i = 0; i < NUM_POOLS; ++i)
  874.                 ttm_page_pool_free(&_manager->pools[i], FREE_ALL_PAGES);
  875.  
  876.         kobject_put(&_manager->kobj);
  877.         _manager = NULL;
  878. }
  879.  
  880. #endif
  881.  
  882. int ttm_pool_populate(struct ttm_tt *ttm)
  883. {
  884.         struct ttm_mem_global *mem_glob = ttm->glob->mem_glob;
  885.         unsigned i;
  886.         int ret;
  887.  
  888.         if (ttm->state != tt_unpopulated)
  889.                 return 0;
  890.  
  891.         for (i = 0; i < ttm->num_pages; ++i) {
  892.                 ret = ttm_get_pages(&ttm->pages[i], 1,
  893.                                     ttm->page_flags,
  894.                                     ttm->caching_state);
  895.                 if (ret != 0) {
  896.                         ttm_pool_unpopulate(ttm);
  897.                         return -ENOMEM;
  898.                 }
  899.  
  900.         }
  901.  
  902.         ttm->state = tt_unbound;
  903.         return 0;
  904. }
  905. EXPORT_SYMBOL(ttm_pool_populate);
  906.  
  907. void ttm_pool_unpopulate(struct ttm_tt *ttm)
  908. {
  909.         unsigned i;
  910.  
  911.         for (i = 0; i < ttm->num_pages; ++i) {
  912.                 if (ttm->pages[i]) {
  913.                         ttm_mem_global_free_page(ttm->glob->mem_glob,
  914.                                                  ttm->pages[i]);
  915.                         ttm_put_pages(&ttm->pages[i], 1,
  916.                                       ttm->page_flags,
  917.                                       ttm->caching_state);
  918.                 }
  919.         }
  920.         ttm->state = tt_unpopulated;
  921. }
  922. EXPORT_SYMBOL(ttm_pool_unpopulate);
  923.  
  924.