Subversion Repositories Kolibri OS

Rev

Rev 886 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1.  
  2. #include <types.h>
  3. #include <core.h>
  4. #include <spinlock.h>
  5. #include <link.h>
  6. #include <mm.h>
  7. #include <slab.h>
  8.  
  9.  
  10. extern zone_t z_core;
  11.  
  12.  
  13. static LIST_INITIALIZE(slab_cache_list);
  14.  
  15. static slab_cache_t *slab_cache;
  16.  
  17. static slab_cache_t slab_cache_cache;
  18.  
  19. static slab_t *slab_create();
  20.  
  21. static slab_cache_t * slab_cache_alloc();
  22.  
  23.  
  24. /**
  25.  * Allocate frames for slab space and initialize
  26.  *
  27.  */
  28. static slab_t * slab_space_alloc(slab_cache_t *cache, int flags)
  29. {
  30.   void *data;
  31.   slab_t *slab;
  32.   size_t fsize;
  33.   unsigned int i;
  34.   u32_t p;
  35.  
  36.     DBG("%s order %d\n", __FUNCTION__, cache->order);
  37.  
  38.     data = (void*)PA2KA(frame_alloc(1 << cache->order));
  39.   if (!data) {
  40.     return NULL;
  41.   }
  42.   slab = (slab_t*)slab_create();
  43.   if (!slab) {
  44.         frame_free(KA2PA(data));
  45.       return NULL;
  46.   }
  47.  
  48.   /* Fill in slab structures */
  49.   for (i = 0; i < ((u32_t) 1 << cache->order); i++)
  50.     frame_set_parent(ADDR2PFN(KA2PA(data)) + i, slab);
  51.  
  52.   slab->start = data;
  53.   slab->available = cache->objects;
  54.   slab->nextavail = (void*)data;
  55.   slab->cache = cache;
  56.  
  57.   for (i = 0, p = (u32_t)slab->start; i < cache->objects; i++)
  58.   {
  59.     *(addr_t *)p = p+cache->size;
  60.     p = p+cache->size;
  61.   };
  62.   atomic_inc(&cache->allocated_slabs);
  63.   return slab;
  64. }
  65.  
  66. /**
  67.  * Take new object from slab or create new if needed
  68.  *
  69.  * @return Object address or null
  70.  */
  71. static void * slab_obj_create(slab_cache_t *cache, int flags)
  72. {
  73.   slab_t *slab;
  74.   void *obj;
  75.  
  76.   spinlock_lock(&cache->slablock);
  77.  
  78.   if (list_empty(&cache->partial_slabs)) {
  79.     slab = slab_space_alloc(cache, flags);
  80.     if (!slab)
  81.     {
  82.       spinlock_unlock(&cache->slablock);
  83.       return NULL;
  84.     }
  85.   } else {
  86.     slab = list_get_instance(cache->partial_slabs.next, slab_t, link);
  87.     list_remove(&slab->link);
  88.   }
  89.  
  90.   obj = slab->nextavail;
  91.   slab->nextavail = *(void**)obj;
  92.   slab->available--;
  93.  
  94.   if (!slab->available)
  95.     list_prepend(&slab->link, &cache->full_slabs);
  96.   else
  97.     list_prepend(&slab->link, &cache->partial_slabs);
  98.  
  99.   spinlock_unlock(&cache->slablock);
  100.  
  101. //  if (cache->constructor && cache->constructor(obj, flags)) {
  102.     /* Bad, bad, construction failed */
  103. //    slab_obj_destroy(cache, obj, slab);
  104. //    return NULL;
  105. //  }
  106.   return obj;
  107. }
  108.  
  109.  
  110. /** Map object to slab structure */
  111. static slab_t * obj2slab(void *obj)
  112. {
  113.   return (slab_t *) frame_get_parent(ADDR2PFN(KA2PA(obj)));
  114. }
  115.  
  116.  
  117. /** Allocate new object from cache - if no flags given, always returns
  118.     memory */
  119. void* __fastcall slab_alloc(slab_cache_t *cache, int flags)
  120. {
  121.    eflags_t efl;
  122.    void *result = NULL;
  123.  
  124.   /* Disable interrupts to avoid deadlocks with interrupt handlers */
  125.    efl = safe_cli();
  126.  
  127.  // if (!(cache->flags & SLAB_CACHE_NOMAGAZINE)) {
  128.  //   result = magazine_obj_get(cache);
  129.  // }
  130. //  if (!result)
  131.     result = slab_obj_create(cache, flags);
  132.  
  133.    safe_sti(efl);
  134.  
  135. //  if (result)
  136. //    atomic_inc(&cache->allocated_objs);
  137.  
  138.   return result;
  139. }
  140.  
  141.  
  142.  
  143. /**************************************/
  144. /* Slab cache functions */
  145.  
  146. /** Return number of objects that fit in certain cache size */
  147. static unsigned int comp_objects(slab_cache_t *cache)
  148. {
  149.   if (cache->flags & SLAB_CACHE_SLINSIDE)
  150.     return ((PAGE_SIZE << cache->order) - sizeof(slab_t)) / cache->size;
  151.   else
  152.     return (PAGE_SIZE << cache->order) / cache->size;
  153. }
  154.  
  155. /** Return wasted space in slab */
  156. static unsigned int badness(slab_cache_t *cache)
  157. {
  158.   unsigned int objects;
  159.   unsigned int ssize;
  160.   size_t val;
  161.  
  162.   objects = comp_objects(cache);
  163.   ssize = PAGE_SIZE << cache->order;
  164.   if (cache->flags & SLAB_CACHE_SLINSIDE)
  165.     ssize -= sizeof(slab_t);
  166.   val = ssize - objects * cache->size;
  167.   return val;
  168.  
  169. }
  170.  
  171.  
  172. /** Initialize allocated memory as a slab cache */
  173. static void
  174. _slab_cache_create(slab_cache_t *cache,
  175.        size_t size,
  176.        size_t align,
  177.        int (*constructor)(void *obj, int kmflag),
  178.        int (*destructor)(void *obj),
  179.        int flags)
  180. {
  181.   int pages;
  182.  // ipl_t ipl;
  183.  
  184. //  memsetb((uintptr_t)cache, sizeof(*cache), 0);
  185. //  cache->name = name;
  186.  
  187. //if (align < sizeof(unative_t))
  188. //    align = sizeof(unative_t);
  189. //  size = ALIGN_UP(size, align);
  190.  
  191.   cache->size = size;
  192.  
  193. //  cache->constructor = constructor;
  194. //  cache->destructor = destructor;
  195.   cache->flags = flags;
  196.  
  197.   list_initialize(&cache->full_slabs);
  198.   list_initialize(&cache->partial_slabs);
  199.   list_initialize(&cache->magazines);
  200. //  spinlock_initialize(&cache->slablock, "slab_lock");
  201. //  spinlock_initialize(&cache->maglock, "slab_maglock");
  202. //  if (! (cache->flags & SLAB_CACHE_NOMAGAZINE))
  203. //    make_magcache(cache);
  204.  
  205.   /* Compute slab sizes, object counts in slabs etc. */
  206.  
  207.   /* Minimum slab order */
  208.   pages = SIZE2FRAMES(cache->size);
  209.   /* We need the 2^order >= pages */
  210.   if (pages <= 1)
  211.     cache->order = 0;
  212.   else
  213.     cache->order = fnzb(pages-1)+1;
  214.  
  215.   while (badness(cache) > SLAB_MAX_BADNESS(cache)) {
  216.     cache->order += 1;
  217.   }
  218.   cache->objects = comp_objects(cache);
  219.  
  220.   /* Add cache to cache list */
  221. //  ipl = interrupts_disable();
  222. //  spinlock_lock(&slab_cache_lock);
  223.  
  224.   list_append(&cache->link, &slab_cache_list);
  225.  
  226. //  spinlock_unlock(&slab_cache_lock);
  227. //  interrupts_restore(ipl);
  228. }
  229.  
  230. /** Create slab cache  */
  231. slab_cache_t * slab_cache_create(
  232.                                  size_t size,
  233.                                  size_t align,
  234.                                  int (*constructor)(void *obj, int kmflag),
  235.                                  int (*destructor)(void *obj),
  236.                                  int flags)
  237. {
  238.         slab_cache_t *cache;
  239.  
  240.     DBG("%s\n", __FUNCTION__);
  241.  
  242.         cache = (slab_cache_t*)slab_cache_alloc();
  243.   _slab_cache_create(cache, size, align, constructor, destructor, flags);
  244.         return cache;
  245. }
  246.  
  247. /**
  248.  * Deallocate space associated with slab
  249.  *
  250.  * @return number of freed frames
  251.  */
  252. static count_t slab_space_free(slab_cache_t *cache, slab_t *slab)
  253. {
  254.         frame_free(KA2PA(slab->start));
  255.         if (! (cache->flags & SLAB_CACHE_SLINSIDE))
  256.     slab_free(slab_cache, slab);
  257.  
  258. //      atomic_dec(&cache->allocated_slabs);
  259.  
  260.         return 1 << cache->order;
  261. }
  262.  
  263. /**
  264.  * Return object to slab and call a destructor
  265.  *
  266.  * @param slab If the caller knows directly slab of the object, otherwise NULL
  267.  *
  268.  * @return Number of freed pages
  269.  */
  270. static count_t slab_obj_destroy(slab_cache_t *cache, void *obj,
  271.                                 slab_t *slab)
  272. {
  273.         int freed = 0;
  274.  
  275.         if (!slab)
  276.                 slab = obj2slab(obj);
  277.  
  278. //      ASSERT(slab->cache == cache);
  279.  
  280. //  if (cache->destructor)
  281. //    freed = cache->destructor(obj);
  282.  
  283. //      spinlock_lock(&cache->slablock);
  284. //      ASSERT(slab->available < cache->objects);
  285.  
  286.         *(void**)obj = slab->nextavail;
  287.         slab->nextavail = obj;
  288.         slab->available++;
  289.  
  290.         /* Move it to correct list */
  291.         if (slab->available == cache->objects) {
  292.                 /* Free associated memory */
  293.                 list_remove(&slab->link);
  294. //              spinlock_unlock(&cache->slablock);
  295.  
  296.                 return freed + slab_space_free(cache, slab);
  297.  
  298.         } else if (slab->available == 1) {
  299.                 /* It was in full, move to partial */
  300.                 list_remove(&slab->link);
  301.                 list_prepend(&slab->link, &cache->partial_slabs);
  302.         }
  303. //      spinlock_unlock(&cache->slablock);
  304.         return freed;
  305. }
  306.  
  307.  
  308.  
  309. /** Return object to cache, use slab if known  */
  310. static void _slab_free(slab_cache_t *cache, void *obj, slab_t *slab)
  311. {
  312.    eflags_t efl;
  313.  
  314.    efl = safe_cli();
  315.  
  316. //      if ((cache->flags & SLAB_CACHE_NOMAGAZINE) \
  317. //      || magazine_obj_put(cache, obj)) {
  318.  
  319.                 slab_obj_destroy(cache, obj, slab);
  320.  
  321. //      }
  322.   safe_sti(efl);
  323.   atomic_dec(&cache->allocated_objs);
  324. }
  325.  
  326. /** Return slab object to cache */
  327. void __fastcall slab_free(slab_cache_t *cache, void *obj)
  328. {
  329.         _slab_free(cache, obj, NULL);
  330. }
  331.  
  332. static slab_t *slab_create()
  333. {
  334.   slab_t *slab;
  335.   void *obj;
  336.   u32_t p;
  337.  
  338.     DBG("%s\n", __FUNCTION__);
  339.  
  340. //  spinlock_lock(&cache->slablock);
  341.  
  342.   if (list_empty(&slab_cache->partial_slabs)) {
  343. //    spinlock_unlock(&cache->slablock);
  344. //    slab = slab_create();
  345.  
  346.     void *data;
  347.     unsigned int i;
  348.  
  349.     data = (void*)PA2KA(alloc_page());
  350.     if (!data) {
  351.       return NULL;
  352.     }
  353.  
  354.     slab = (slab_t*)((u32_t)data + PAGE_SIZE - sizeof(slab_t));
  355.  
  356.     /* Fill in slab structures */
  357.     frame_set_parent(ADDR2PFN(KA2PA(data)), slab);
  358.  
  359.     slab->start = data;
  360.     slab->available = slab_cache->objects;
  361.     slab->nextavail = (void*)data;
  362.     slab->cache = slab_cache;
  363.  
  364.     for (i = 0,p = (u32_t)slab->start;i < slab_cache->objects; i++)
  365.     {
  366.       *(int *)p = p+slab_cache->size;
  367.       p = p+slab_cache->size;
  368.     };
  369.  
  370.  
  371.     atomic_inc(&slab_cache->allocated_slabs);
  372. //    spinlock_lock(&cache->slablock);
  373.   } else {
  374.     slab = list_get_instance(slab_cache->partial_slabs.next, slab_t, link);
  375.     list_remove(&slab->link);
  376.   }
  377.   obj = slab->nextavail;
  378.   slab->nextavail = *((void**)obj);
  379.   slab->available--;
  380.  
  381.   if (!slab->available)
  382.     list_prepend(&slab->link, &slab_cache->full_slabs);
  383.   else
  384.     list_prepend(&slab->link, &slab_cache->partial_slabs);
  385.  
  386. //  spinlock_unlock(&cache->slablock);
  387.  
  388.   return (slab_t*)obj;
  389. }
  390.  
  391. static slab_cache_t * slab_cache_alloc()
  392. {
  393.   slab_t *slab;
  394.   void *obj;
  395.   u32_t *p;
  396.  
  397.     DBG("%s\n", __FUNCTION__);
  398.  
  399.     if (list_empty(&slab_cache_cache.partial_slabs))
  400.     {
  401. //    spinlock_unlock(&cache->slablock);
  402. //    slab = slab_create();
  403.  
  404.     void *data;
  405.     unsigned int i;
  406.  
  407.         data = (void*)(PA2KA(alloc_page()));
  408.     if (!data) {
  409.       return NULL;
  410.     }
  411.  
  412.     slab = (slab_t*)((u32_t)data + PAGE_SIZE - sizeof(slab_t));
  413.  
  414.     /* Fill in slab structures */
  415.     frame_set_parent(ADDR2PFN(KA2PA(data)), slab);
  416.  
  417.     slab->start = data;
  418.     slab->available = slab_cache_cache.objects;
  419.     slab->nextavail = (void*)data;
  420.     slab->cache = &slab_cache_cache;
  421.  
  422.     for (i = 0,p = (u32_t*)slab->start;i < slab_cache_cache.objects; i++)
  423.     {
  424.       *p = (u32_t)p+slab_cache_cache.size;
  425.       p = (u32_t*)((u32_t)p+slab_cache_cache.size);
  426.     };
  427.  
  428.  
  429.     atomic_inc(&slab_cache_cache.allocated_slabs);
  430. //    spinlock_lock(&cache->slablock);
  431.     }
  432.     else {
  433.     slab = list_get_instance(slab_cache_cache.partial_slabs.next, slab_t, link);
  434.     list_remove(&slab->link);
  435.   }
  436.   obj = slab->nextavail;
  437.   slab->nextavail = *((void**)obj);
  438.   slab->available--;
  439.  
  440.   if (!slab->available)
  441.     list_prepend(&slab->link, &slab_cache_cache.full_slabs);
  442.   else
  443.     list_prepend(&slab->link, &slab_cache_cache.partial_slabs);
  444.  
  445. //  spinlock_unlock(&cache->slablock);
  446.  
  447.   return (slab_cache_t*)obj;
  448. }
  449.  
  450. void slab_cache_init(void)
  451. {
  452.     DBG("%s\n", __FUNCTION__);
  453.  
  454.   _slab_cache_create(&slab_cache_cache, sizeof(slab_cache_t),
  455.                      sizeof(void *), NULL, NULL,
  456.                      SLAB_CACHE_NOMAGAZINE | SLAB_CACHE_SLINSIDE);
  457.  
  458.         /* Initialize external slab cache */
  459.   slab_cache = slab_cache_create(sizeof(slab_t),
  460.                                               0, NULL, NULL,SLAB_CACHE_MAGDEFERRED);
  461. };
  462.  
  463.