Subversion Repositories Kolibri OS

Rev

Rev 1066 | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1.  
  2. #include <types.h>
  3. #include <core.h>
  4. #include <spinlock.h>
  5. #include <link.h>
  6. #include <mm.h>
  7. #include <slab.h>
  8.  
  9.  
  10. extern zone_t z_core;
  11.  
  12.  
  13. static LIST_INITIALIZE(slab_cache_list);
  14.  
  15. static slab_cache_t *slab_cache;
  16.  
  17. static slab_cache_t slab_cache_cache;
  18.  
  19. static slab_t *slab_create();
  20.  
  21. static slab_cache_t * slab_cache_alloc();
  22.  
  23.  
  24. /**
  25.  * Allocate frames for slab space and initialize
  26.  *
  27.  */
  28. static slab_t * slab_space_alloc(slab_cache_t *cache, int flags)
  29. {
  30.     void *data;
  31.     slab_t *slab;
  32.     size_t fsize;
  33.     unsigned int i;
  34.     u32_t p;
  35.  
  36.     DBG("%s order %d\n", __FUNCTION__, cache->order);
  37.  
  38.     data = (void*)PA2KA(frame_alloc(1 << cache->order));
  39.     if (!data) {
  40.         return NULL;
  41.     }
  42.     slab = (slab_t*)slab_create();
  43.     if (!slab) {
  44.         frame_free(KA2PA(data));
  45.         return NULL;
  46.     }
  47.  
  48.   /* Fill in slab structures */
  49.     for (i = 0; i < ((u32_t) 1 << cache->order); i++)
  50.         frame_set_parent(ADDR2PFN(KA2PA(data)) + i, slab);
  51.  
  52.     slab->start = data;
  53.     slab->available = cache->objects;
  54.     slab->nextavail = (void*)data;
  55.     slab->cache = cache;
  56.  
  57.     for (i = 0, p = (u32_t)slab->start; i < cache->objects; i++)
  58.     {
  59.         *(addr_t *)p = p+cache->size;
  60.         p = p+cache->size;
  61.     };
  62.     atomic_inc(&cache->allocated_slabs);
  63.     return slab;
  64. }
  65.  
  66. /**
  67.  * Take new object from slab or create new if needed
  68.  *
  69.  * @return Object address or null
  70.  */
  71. static void * slab_obj_create(slab_cache_t *cache, int flags)
  72. {
  73.   slab_t *slab;
  74.   void *obj;
  75.  
  76.   spinlock_lock(&cache->slablock);
  77.  
  78.   if (list_empty(&cache->partial_slabs)) {
  79.     slab = slab_space_alloc(cache, flags);
  80.     if (!slab)
  81.     {
  82.       spinlock_unlock(&cache->slablock);
  83.       return NULL;
  84.     }
  85.   } else {
  86.     slab = list_get_instance(cache->partial_slabs.next, slab_t, link);
  87.     list_remove(&slab->link);
  88.   }
  89.  
  90.   obj = slab->nextavail;
  91.   slab->nextavail = *(void**)obj;
  92.   slab->available--;
  93.  
  94.   if (!slab->available)
  95.     list_prepend(&slab->link, &cache->full_slabs);
  96.   else
  97.     list_prepend(&slab->link, &cache->partial_slabs);
  98.  
  99.   spinlock_unlock(&cache->slablock);
  100.  
  101. //  if (cache->constructor && cache->constructor(obj, flags)) {
  102.     /* Bad, bad, construction failed */
  103. //    slab_obj_destroy(cache, obj, slab);
  104. //    return NULL;
  105. //  }
  106.   return obj;
  107. }
  108.  
  109.  
  110. /** Map object to slab structure */
  111. static slab_t * obj2slab(void *obj)
  112. {
  113.   return (slab_t *) frame_get_parent(ADDR2PFN(KA2PA(obj)));
  114. }
  115.  
  116.  
  117. /** Allocate new object from cache - if no flags given, always returns
  118.     memory */
  119. void* __fastcall slab_alloc(slab_cache_t *cache, int flags)
  120. {
  121.    eflags_t efl;
  122.    void *result = NULL;
  123.  
  124.   /* Disable interrupts to avoid deadlocks with interrupt handlers */
  125.    efl = safe_cli();
  126.  
  127.  // if (!(cache->flags & SLAB_CACHE_NOMAGAZINE)) {
  128.  //   result = magazine_obj_get(cache);
  129.  // }
  130. //  if (!result)
  131.     result = slab_obj_create(cache, flags);
  132.  
  133.    safe_sti(efl);
  134.  
  135. //  if (result)
  136. //    atomic_inc(&cache->allocated_objs);
  137.  
  138.   return result;
  139. }
  140.  
  141.  
  142.  
  143. /**************************************/
  144. /* Slab cache functions */
  145.  
  146. /** Return number of objects that fit in certain cache size */
  147. static unsigned int comp_objects(slab_cache_t *cache)
  148. {
  149.   if (cache->flags & SLAB_CACHE_SLINSIDE)
  150.     return ((PAGE_SIZE << cache->order) - sizeof(slab_t)) / cache->size;
  151.   else
  152.     return (PAGE_SIZE << cache->order) / cache->size;
  153. }
  154.  
  155. /** Return wasted space in slab */
  156. static unsigned int badness(slab_cache_t *cache)
  157. {
  158.   unsigned int objects;
  159.   unsigned int ssize;
  160.   size_t val;
  161.  
  162.   objects = comp_objects(cache);
  163.   ssize = PAGE_SIZE << cache->order;
  164.   if (cache->flags & SLAB_CACHE_SLINSIDE)
  165.     ssize -= sizeof(slab_t);
  166.   val = ssize - objects * cache->size;
  167.   return val;
  168.  
  169. }
  170.  
  171.  
  172. /** Initialize allocated memory as a slab cache */
  173. static void
  174. _slab_cache_create(slab_cache_t *cache,
  175.        size_t size,
  176.        size_t align,
  177.        int (*constructor)(void *obj, int kmflag),
  178.        int (*destructor)(void *obj),
  179.        int flags)
  180. {
  181.     int pages;
  182.  // ipl_t ipl;
  183.  
  184. //  memsetb((uintptr_t)cache, sizeof(*cache), 0);
  185. //  cache->name = name;
  186.  
  187. //if (align < sizeof(unative_t))
  188. //    align = sizeof(unative_t);
  189. //  size = ALIGN_UP(size, align);
  190.  
  191.     cache->size = size;
  192.  
  193. //  cache->constructor = constructor;
  194. //  cache->destructor = destructor;
  195.     cache->flags = flags;
  196.  
  197.     list_initialize(&cache->full_slabs);
  198.     list_initialize(&cache->partial_slabs);
  199.     list_initialize(&cache->magazines);
  200. //  spinlock_initialize(&cache->slablock, "slab_lock");
  201. //  spinlock_initialize(&cache->maglock, "slab_maglock");
  202. //  if (! (cache->flags & SLAB_CACHE_NOMAGAZINE))
  203. //    make_magcache(cache);
  204.  
  205.   /* Compute slab sizes, object counts in slabs etc. */
  206.  
  207.   /* Minimum slab order */
  208.     pages = SIZE2FRAMES(cache->size);
  209.   /* We need the 2^order >= pages */
  210.     if (pages <= 1)
  211.         cache->order = 0;
  212.     else
  213.         cache->order = fnzb(pages-1)+1;
  214.  
  215.     while (badness(cache) > SLAB_MAX_BADNESS(cache)) {
  216.         cache->order += 1;
  217.     }
  218.     cache->objects = comp_objects(cache);
  219.  
  220.   /* Add cache to cache list */
  221. //  ipl = interrupts_disable();
  222. //  spinlock_lock(&slab_cache_lock);
  223.  
  224.     list_append(&cache->link, &slab_cache_list);
  225.  
  226. //  spinlock_unlock(&slab_cache_lock);
  227. //  interrupts_restore(ipl);
  228. }
  229.  
  230. /** Create slab cache  */
  231. slab_cache_t * slab_cache_create(
  232.                                  size_t size,
  233.                                  size_t align,
  234.                                  int (*constructor)(void *obj, int kmflag),
  235.                                  int (*destructor)(void *obj),
  236.                                  int flags)
  237. {
  238.         slab_cache_t *cache;
  239.  
  240.     DBG("%s\n", __FUNCTION__);
  241.  
  242.         cache = (slab_cache_t*)slab_cache_alloc();
  243.  
  244.     _slab_cache_create(cache, size, align, constructor, destructor, flags);
  245.  
  246.         return cache;
  247. }
  248.  
  249. /**
  250.  * Deallocate space associated with slab
  251.  *
  252.  * @return number of freed frames
  253.  */
  254. static count_t slab_space_free(slab_cache_t *cache, slab_t *slab)
  255. {
  256.         frame_free(KA2PA(slab->start));
  257.         if (! (cache->flags & SLAB_CACHE_SLINSIDE))
  258.     slab_free(slab_cache, slab);
  259.  
  260. //      atomic_dec(&cache->allocated_slabs);
  261.  
  262.         return 1 << cache->order;
  263. }
  264.  
  265. /**
  266.  * Return object to slab and call a destructor
  267.  *
  268.  * @param slab If the caller knows directly slab of the object, otherwise NULL
  269.  *
  270.  * @return Number of freed pages
  271.  */
  272. static count_t slab_obj_destroy(slab_cache_t *cache, void *obj,
  273.                                 slab_t *slab)
  274. {
  275.         int freed = 0;
  276.  
  277.         if (!slab)
  278.                 slab = obj2slab(obj);
  279.  
  280. //      ASSERT(slab->cache == cache);
  281.  
  282. //  if (cache->destructor)
  283. //    freed = cache->destructor(obj);
  284.  
  285. //      spinlock_lock(&cache->slablock);
  286. //      ASSERT(slab->available < cache->objects);
  287.  
  288.         *(void**)obj = slab->nextavail;
  289.         slab->nextavail = obj;
  290.         slab->available++;
  291.  
  292.         /* Move it to correct list */
  293.         if (slab->available == cache->objects) {
  294.                 /* Free associated memory */
  295.                 list_remove(&slab->link);
  296. //              spinlock_unlock(&cache->slablock);
  297.  
  298.                 return freed + slab_space_free(cache, slab);
  299.  
  300.         } else if (slab->available == 1) {
  301.                 /* It was in full, move to partial */
  302.                 list_remove(&slab->link);
  303.                 list_prepend(&slab->link, &cache->partial_slabs);
  304.         }
  305. //      spinlock_unlock(&cache->slablock);
  306.         return freed;
  307. }
  308.  
  309.  
  310.  
  311. /** Return object to cache, use slab if known  */
  312. static void _slab_free(slab_cache_t *cache, void *obj, slab_t *slab)
  313. {
  314.    eflags_t efl;
  315.  
  316.    efl = safe_cli();
  317.  
  318. //      if ((cache->flags & SLAB_CACHE_NOMAGAZINE) \
  319. //      || magazine_obj_put(cache, obj)) {
  320.  
  321.                 slab_obj_destroy(cache, obj, slab);
  322.  
  323. //      }
  324.   safe_sti(efl);
  325.   atomic_dec(&cache->allocated_objs);
  326. }
  327.  
  328. /** Return slab object to cache */
  329. void __fastcall slab_free(slab_cache_t *cache, void *obj)
  330. {
  331.         _slab_free(cache, obj, NULL);
  332. }
  333.  
  334. static slab_t *slab_create()
  335. {
  336.     slab_t *slab;
  337.     void *obj;
  338.     u32_t p;
  339.  
  340.     DBG("%s\n", __FUNCTION__);
  341.  
  342. //  spinlock_lock(&cache->slablock);
  343.  
  344.   if (list_empty(&slab_cache->partial_slabs)) {
  345. //    spinlock_unlock(&cache->slablock);
  346. //    slab = slab_create();
  347.  
  348.     void *data;
  349.     unsigned int i;
  350.  
  351.     data = (void*)PA2KA(alloc_page());
  352.     if (!data) {
  353.       return NULL;
  354.     }
  355.  
  356.     slab = (slab_t*)((u32_t)data + PAGE_SIZE - sizeof(slab_t));
  357.  
  358.     /* Fill in slab structures */
  359.     frame_set_parent(ADDR2PFN(KA2PA(data)), slab);
  360.  
  361.     slab->start = data;
  362.     slab->available = slab_cache->objects;
  363.     slab->nextavail = (void*)data;
  364.     slab->cache = slab_cache;
  365.  
  366.     for (i = 0,p = (u32_t)slab->start;i < slab_cache->objects; i++)
  367.     {
  368.       *(int *)p = p+slab_cache->size;
  369.       p = p+slab_cache->size;
  370.     };
  371.  
  372.  
  373.     atomic_inc(&slab_cache->allocated_slabs);
  374. //    spinlock_lock(&cache->slablock);
  375.   } else {
  376.     slab = list_get_instance(slab_cache->partial_slabs.next, slab_t, link);
  377.     list_remove(&slab->link);
  378.   }
  379.   obj = slab->nextavail;
  380.   slab->nextavail = *((void**)obj);
  381.   slab->available--;
  382.  
  383.   if (!slab->available)
  384.     list_prepend(&slab->link, &slab_cache->full_slabs);
  385.   else
  386.     list_prepend(&slab->link, &slab_cache->partial_slabs);
  387.  
  388. //  spinlock_unlock(&cache->slablock);
  389.  
  390.   return (slab_t*)obj;
  391. }
  392.  
  393. static slab_cache_t * slab_cache_alloc()
  394. {
  395.     slab_t *slab;
  396.     void *obj;
  397.     u32_t *p;
  398.  
  399.     DBG("%s\n", __FUNCTION__);
  400.  
  401.     if (list_empty(&slab_cache_cache.partial_slabs))
  402.     {
  403. //    spinlock_unlock(&cache->slablock);
  404. //    slab = slab_create();
  405.  
  406.         void *data;
  407.         unsigned int i;
  408.  
  409.         data = (void*)(PA2KA(alloc_page()));
  410.         if (!data) {
  411.             return NULL;
  412.         }
  413.  
  414.         slab = (slab_t*)((u32_t)data + PAGE_SIZE - sizeof(slab_t));
  415.  
  416.     /* Fill in slab structures */
  417.         frame_set_parent(ADDR2PFN(KA2PA(data)), slab);
  418.  
  419.         slab->start = data;
  420.         slab->available = slab_cache_cache.objects;
  421.         slab->nextavail = (void*)data;
  422.         slab->cache = &slab_cache_cache;
  423.  
  424.         for (i = 0,p = (u32_t*)slab->start;i < slab_cache_cache.objects; i++)
  425.         {
  426.             *p = (u32_t)p+slab_cache_cache.size;
  427.             p = (u32_t*)((u32_t)p+slab_cache_cache.size);
  428.         };
  429.  
  430.  
  431.         atomic_inc(&slab_cache_cache.allocated_slabs);
  432. //    spinlock_lock(&cache->slablock);
  433.     }
  434.     else {
  435.         slab = list_get_instance(slab_cache_cache.partial_slabs.next, slab_t, link);
  436.         list_remove(&slab->link);
  437.     }
  438.     obj = slab->nextavail;
  439.     slab->nextavail = *((void**)obj);
  440.     slab->available--;
  441.  
  442.     if (!slab->available)
  443.         list_prepend(&slab->link, &slab_cache_cache.full_slabs);
  444.     else
  445.         list_prepend(&slab->link, &slab_cache_cache.partial_slabs);
  446.  
  447. //  spinlock_unlock(&cache->slablock);
  448.  
  449.     return (slab_cache_t*)obj;
  450. }
  451.  
  452. void slab_cache_init(void)
  453. {
  454.     DBG("%s\n", __FUNCTION__);
  455.  
  456.     _slab_cache_create(&slab_cache_cache, sizeof(slab_cache_t),
  457.                      sizeof(void *), NULL, NULL,
  458.                      SLAB_CACHE_NOMAGAZINE | SLAB_CACHE_SLINSIDE);
  459.  
  460.         /* Initialize external slab cache */
  461.     slab_cache = slab_cache_create(sizeof(slab_t),
  462.                                               0, NULL, NULL,SLAB_CACHE_MAGDEFERRED);
  463. };
  464.  
  465.