Subversion Repositories Kolibri OS

Rev

Rev 886 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1.  
  2. #include <types.h>
  3. #include <core.h>
  4. #include <spinlock.h>
  5. #include <link.h>
  6. #include <mm.h>
  7. #include <slab.h>
  8.  
  9.  
  10. extern zone_t z_core;
  11.  
  12.  
  13. static LIST_INITIALIZE(slab_cache_list);
  14.  
  15. static slab_cache_t *slab_cache;
  16.  
  17. static slab_cache_t slab_cache_cache;
  18.  
  19. static slab_t *slab_create();
  20.  
  21. static slab_cache_t * slab_cache_alloc();
  22.  
  23. void slab_free(slab_cache_t *cache, void *obj);
  24.  
  25.  
  26. /**
  27.  * Allocate frames for slab space and initialize
  28.  *
  29.  */
  30. static slab_t * slab_space_alloc(slab_cache_t *cache, int flags)
  31. {
  32.   void *data;
  33.   slab_t *slab;
  34.   size_t fsize;
  35.   unsigned int i;
  36.   u32_t p;
  37.  
  38.   data = (void*)PA2KA(core_alloc(cache->order));
  39.   if (!data) {
  40.     return NULL;
  41.   }
  42.   slab = (slab_t*)slab_create();
  43.   if (!slab) {
  44.     core_free(KA2PA(data));
  45.       return NULL;
  46.   }
  47.  
  48.   /* Fill in slab structures */
  49.   for (i = 0; i < ((u32_t) 1 << cache->order); i++)
  50.     frame_set_parent(ADDR2PFN(KA2PA(data)) + i, slab);
  51.  
  52.   slab->start = data;
  53.   slab->available = cache->objects;
  54.   slab->nextavail = (void*)data;
  55.   slab->cache = cache;
  56.  
  57.   for (i = 0, p = (u32_t)slab->start; i < cache->objects; i++)
  58.   {
  59.     *(addr_t *)p = p+cache->size;
  60.     p = p+cache->size;
  61.   };
  62.   atomic_inc(&cache->allocated_slabs);
  63.   return slab;
  64. }
  65.  
  66. /**
  67.  * Take new object from slab or create new if needed
  68.  *
  69.  * @return Object address or null
  70.  */
  71. static void * slab_obj_create(slab_cache_t *cache, int flags)
  72. {
  73.   slab_t *slab;
  74.   void *obj;
  75.  
  76.   spinlock_lock(&cache->slablock);
  77.  
  78.   if (list_empty(&cache->partial_slabs)) {
  79.     /* Allow recursion and reclaiming
  80.      * - this should work, as the slab control structures
  81.      *   are small and do not need to allocate with anything
  82.      *   other than frame_alloc when they are allocating,
  83.      *   that's why we should get recursion at most 1-level deep
  84.      */
  85.     slab = slab_space_alloc(cache, flags);
  86.     if (!slab)
  87.     {
  88.       spinlock_unlock(&cache->slablock);
  89.       return NULL;
  90.     }
  91.   } else {
  92.     slab = list_get_instance(cache->partial_slabs.next, slab_t, link);
  93.     list_remove(&slab->link);
  94.   }
  95.  
  96.   obj = slab->nextavail;
  97.   slab->nextavail = *(void**)obj;
  98.   slab->available--;
  99.  
  100.   if (!slab->available)
  101.     list_prepend(&slab->link, &cache->full_slabs);
  102.   else
  103.     list_prepend(&slab->link, &cache->partial_slabs);
  104.  
  105.   spinlock_unlock(&cache->slablock);
  106.  
  107. //  if (cache->constructor && cache->constructor(obj, flags)) {
  108.     /* Bad, bad, construction failed */
  109. //    slab_obj_destroy(cache, obj, slab);
  110. //    return NULL;
  111. //  }
  112.   return obj;
  113. }
  114.  
  115.  
  116. /** Map object to slab structure */
  117. static slab_t * obj2slab(void *obj)
  118. {
  119.   return (slab_t *) frame_get_parent(ADDR2PFN(KA2PA(obj)));
  120. }
  121.  
  122.  
  123. /** Allocate new object from cache - if no flags given, always returns
  124.     memory */
  125. void* __fastcall slab_alloc(slab_cache_t *cache, int flags)
  126. {
  127.    eflags_t efl;
  128.    void *result = NULL;
  129.  
  130.   /* Disable interrupts to avoid deadlocks with interrupt handlers */
  131.    efl = safe_cli();
  132.  
  133.  // if (!(cache->flags & SLAB_CACHE_NOMAGAZINE)) {
  134.  //   result = magazine_obj_get(cache);
  135.  // }
  136. //  if (!result)
  137.     result = slab_obj_create(cache, flags);
  138.  
  139.    safe_sti(efl);
  140.  
  141. //  if (result)
  142. //    atomic_inc(&cache->allocated_objs);
  143.  
  144.   return result;
  145. }
  146.  
  147.  
  148.  
  149. /**************************************/
  150. /* Slab cache functions */
  151.  
  152. /** Return number of objects that fit in certain cache size */
  153. static unsigned int comp_objects(slab_cache_t *cache)
  154. {
  155.   if (cache->flags & SLAB_CACHE_SLINSIDE)
  156.     return ((PAGE_SIZE << cache->order) - sizeof(slab_t)) / cache->size;
  157.   else
  158.     return (PAGE_SIZE << cache->order) / cache->size;
  159. }
  160.  
  161. /** Return wasted space in slab */
  162. static unsigned int badness(slab_cache_t *cache)
  163. {
  164.   unsigned int objects;
  165.   unsigned int ssize;
  166.   size_t val;
  167.  
  168.   objects = comp_objects(cache);
  169.   ssize = PAGE_SIZE << cache->order;
  170.   if (cache->flags & SLAB_CACHE_SLINSIDE)
  171.     ssize -= sizeof(slab_t);
  172.   val = ssize - objects * cache->size;
  173.   return val;
  174.  
  175. }
  176.  
  177.  
  178. /** Initialize allocated memory as a slab cache */
  179. static void
  180. _slab_cache_create(slab_cache_t *cache,
  181.        size_t size,
  182.        size_t align,
  183.        int (*constructor)(void *obj, int kmflag),
  184.        int (*destructor)(void *obj),
  185.        int flags)
  186. {
  187.   int pages;
  188.  // ipl_t ipl;
  189.  
  190. //  memsetb((uintptr_t)cache, sizeof(*cache), 0);
  191. //  cache->name = name;
  192.  
  193. //if (align < sizeof(unative_t))
  194. //    align = sizeof(unative_t);
  195. //  size = ALIGN_UP(size, align);
  196.  
  197.   cache->size = size;
  198.  
  199. //  cache->constructor = constructor;
  200. //  cache->destructor = destructor;
  201.   cache->flags = flags;
  202.  
  203.   list_initialize(&cache->full_slabs);
  204.   list_initialize(&cache->partial_slabs);
  205.   list_initialize(&cache->magazines);
  206. //  spinlock_initialize(&cache->slablock, "slab_lock");
  207. //  spinlock_initialize(&cache->maglock, "slab_maglock");
  208. //  if (! (cache->flags & SLAB_CACHE_NOMAGAZINE))
  209. //    make_magcache(cache);
  210.  
  211.   /* Compute slab sizes, object counts in slabs etc. */
  212.  
  213.   /* Minimum slab order */
  214.   pages = SIZE2FRAMES(cache->size);
  215.   /* We need the 2^order >= pages */
  216.   if (pages == 1)
  217.     cache->order = 0;
  218.   else
  219.     cache->order = fnzb(pages-1)+1;
  220.  
  221.   while (badness(cache) > SLAB_MAX_BADNESS(cache)) {
  222.     cache->order += 1;
  223.   }
  224.   cache->objects = comp_objects(cache);
  225.  
  226.   /* Add cache to cache list */
  227. //  ipl = interrupts_disable();
  228. //  spinlock_lock(&slab_cache_lock);
  229.  
  230.   list_append(&cache->link, &slab_cache_list);
  231.  
  232. //  spinlock_unlock(&slab_cache_lock);
  233. //  interrupts_restore(ipl);
  234. }
  235.  
  236. /** Create slab cache  */
  237. slab_cache_t * slab_cache_create(
  238.                                  size_t size,
  239.                                  size_t align,
  240.                                  int (*constructor)(void *obj, int kmflag),
  241.                                  int (*destructor)(void *obj),
  242.                                  int flags)
  243. {
  244.         slab_cache_t *cache;
  245.  
  246.         cache = (slab_cache_t*)slab_cache_alloc();
  247.   _slab_cache_create(cache, size, align, constructor, destructor, flags);
  248.         return cache;
  249. }
  250.  
  251. /**
  252.  * Deallocate space associated with slab
  253.  *
  254.  * @return number of freed frames
  255.  */
  256. static count_t slab_space_free(slab_cache_t *cache, slab_t *slab)
  257. {
  258.         frame_free(KA2PA(slab->start));
  259.         if (! (cache->flags & SLAB_CACHE_SLINSIDE))
  260.     slab_free(slab_cache, slab);
  261.  
  262. //      atomic_dec(&cache->allocated_slabs);
  263.  
  264.         return 1 << cache->order;
  265. }
  266.  
  267. /**
  268.  * Return object to slab and call a destructor
  269.  *
  270.  * @param slab If the caller knows directly slab of the object, otherwise NULL
  271.  *
  272.  * @return Number of freed pages
  273.  */
  274. static count_t slab_obj_destroy(slab_cache_t *cache, void *obj,
  275.                                 slab_t *slab)
  276. {
  277.         int freed = 0;
  278.  
  279.         if (!slab)
  280.                 slab = obj2slab(obj);
  281.  
  282. //      ASSERT(slab->cache == cache);
  283.  
  284. //  if (cache->destructor)
  285. //    freed = cache->destructor(obj);
  286.  
  287. //      spinlock_lock(&cache->slablock);
  288. //      ASSERT(slab->available < cache->objects);
  289.  
  290.         *(void**)obj = slab->nextavail;
  291.         slab->nextavail = obj;
  292.         slab->available++;
  293.  
  294.         /* Move it to correct list */
  295.         if (slab->available == cache->objects) {
  296.                 /* Free associated memory */
  297.                 list_remove(&slab->link);
  298. //              spinlock_unlock(&cache->slablock);
  299.  
  300.                 return freed + slab_space_free(cache, slab);
  301.  
  302.         } else if (slab->available == 1) {
  303.                 /* It was in full, move to partial */
  304.                 list_remove(&slab->link);
  305.                 list_prepend(&slab->link, &cache->partial_slabs);
  306.         }
  307. //      spinlock_unlock(&cache->slablock);
  308.         return freed;
  309. }
  310.  
  311.  
  312.  
  313. /** Return object to cache, use slab if known  */
  314. static void _slab_free(slab_cache_t *cache, void *obj, slab_t *slab)
  315. {
  316. //      ipl_t ipl;
  317.  
  318. //      ipl = interrupts_disable();
  319.  
  320. //      if ((cache->flags & SLAB_CACHE_NOMAGAZINE) \
  321.             || magazine_obj_put(cache, obj)) {
  322.  
  323.                 slab_obj_destroy(cache, obj, slab);
  324.  
  325. //      }
  326. //      interrupts_restore(ipl);
  327. //      atomic_dec(&cache->allocated_objs);
  328. }
  329.  
  330. /** Return slab object to cache */
  331. void slab_free(slab_cache_t *cache, void *obj)
  332. {
  333.         _slab_free(cache, obj, NULL);
  334. }
  335.  
  336. static slab_t *slab_create()
  337. {
  338.   slab_t *slab;
  339.   void *obj;
  340.   u32_t p;
  341.  
  342. //  spinlock_lock(&cache->slablock);
  343.  
  344.   if (list_empty(&slab_cache->partial_slabs)) {
  345.     /* Allow recursion and reclaiming
  346.      * - this should work, as the slab control structures
  347.      *   are small and do not need to allocate with anything
  348.      *   other than frame_alloc when they are allocating,
  349.      *   that's why we should get recursion at most 1-level deep
  350.      */
  351. //    spinlock_unlock(&cache->slablock);
  352. //    slab = slab_create();
  353.  
  354.     void *data;
  355.     unsigned int i;
  356.  
  357.     data = (void*)PA2KA(core_alloc(0));
  358.     if (!data) {
  359.       return NULL;
  360.     }
  361.  
  362.     slab = (slab_t*)((u32_t)data + PAGE_SIZE - sizeof(slab_t));
  363.  
  364.     /* Fill in slab structures */
  365.     frame_set_parent(ADDR2PFN(KA2PA(data)), slab);
  366.  
  367.     slab->start = data;
  368.     slab->available = slab_cache->objects;
  369.     slab->nextavail = (void*)data;
  370.     slab->cache = slab_cache;
  371.  
  372.     for (i = 0,p = (u32_t)slab->start;i < slab_cache->objects; i++)
  373.     {
  374.       *(int *)p = p+slab_cache->size;
  375.       p = p+slab_cache->size;
  376.     };
  377.  
  378.  
  379.     atomic_inc(&slab_cache->allocated_slabs);
  380. //    spinlock_lock(&cache->slablock);
  381.   } else {
  382.     slab = list_get_instance(slab_cache->partial_slabs.next, slab_t, link);
  383.     list_remove(&slab->link);
  384.   }
  385.   obj = slab->nextavail;
  386.   slab->nextavail = *((void**)obj);
  387.   slab->available--;
  388.  
  389.   if (!slab->available)
  390.     list_prepend(&slab->link, &slab_cache->full_slabs);
  391.   else
  392.     list_prepend(&slab->link, &slab_cache->partial_slabs);
  393.  
  394. //  spinlock_unlock(&cache->slablock);
  395.  
  396.   return (slab_t*)obj;
  397. }
  398.  
  399. static slab_cache_t * slab_cache_alloc()
  400. {
  401.   slab_t *slab;
  402.   void *obj;
  403.   u32_t *p;
  404.  
  405.   if (list_empty(&slab_cache_cache.partial_slabs)) {
  406.     /* Allow recursion and reclaiming
  407.      * - this should work, as the slab control structures
  408.      *   are small and do not need to allocate with anything
  409.      *   other than frame_alloc when they are allocating,
  410.      *   that's why we should get recursion at most 1-level deep
  411.      */
  412. //    spinlock_unlock(&cache->slablock);
  413. //    slab = slab_create();
  414.  
  415.     void *data;
  416.     unsigned int i;
  417.  
  418.     data = (void*)(PA2KA(core_alloc(0)));
  419.     if (!data) {
  420.       return NULL;
  421.     }
  422.  
  423.     slab = (slab_t*)((u32_t)data + PAGE_SIZE - sizeof(slab_t));
  424.  
  425.     /* Fill in slab structures */
  426.     frame_set_parent(ADDR2PFN(KA2PA(data)), slab);
  427.  
  428.     slab->start = data;
  429.     slab->available = slab_cache_cache.objects;
  430.     slab->nextavail = (void*)data;
  431.     slab->cache = &slab_cache_cache;
  432.  
  433.     for (i = 0,p = (u32_t*)slab->start;i < slab_cache_cache.objects; i++)
  434.     {
  435.       *p = (u32_t)p+slab_cache_cache.size;
  436.       p = (u32_t*)((u32_t)p+slab_cache_cache.size);
  437.     };
  438.  
  439.  
  440.     atomic_inc(&slab_cache_cache.allocated_slabs);
  441. //    spinlock_lock(&cache->slablock);
  442.   } else {
  443.     slab = list_get_instance(slab_cache_cache.partial_slabs.next, slab_t, link);
  444.     list_remove(&slab->link);
  445.   }
  446.   obj = slab->nextavail;
  447.   slab->nextavail = *((void**)obj);
  448.   slab->available--;
  449.  
  450.   if (!slab->available)
  451.     list_prepend(&slab->link, &slab_cache_cache.full_slabs);
  452.   else
  453.     list_prepend(&slab->link, &slab_cache_cache.partial_slabs);
  454.  
  455. //  spinlock_unlock(&cache->slablock);
  456.  
  457.   return (slab_cache_t*)obj;
  458. }
  459.  
  460. void slab_cache_init(void)
  461. {
  462.  
  463.   _slab_cache_create(&slab_cache_cache, sizeof(slab_cache_t),
  464.                      sizeof(void *), NULL, NULL,
  465.                      SLAB_CACHE_NOMAGAZINE | SLAB_CACHE_SLINSIDE);
  466.  
  467.         /* Initialize external slab cache */
  468.   slab_cache = slab_cache_create(sizeof(slab_t),
  469.                                               0, NULL, NULL,SLAB_CACHE_MAGDEFERRED);
  470. };
  471.  
  472.