Subversion Repositories Kolibri OS

Rev

Rev 859 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1.  
  2. #include <types.h>
  3. #include <core.h>
  4. #include <spinlock.h>
  5. #include <link.h>
  6. #include <mm.h>
  7. #include <slab.h>
  8.  
  9.  
  10. extern zone_t z_core;
  11.  
  12.  
  13. static LIST_INITIALIZE(slab_cache_list);
  14.  
  15. static slab_cache_t *slab_cache;
  16.  
  17. static slab_cache_t slab_cache_cache;
  18.  
  19. static slab_t *slab_create();
  20.  
  21. static slab_cache_t * slab_cache_alloc();
  22.  
  23.  
  24. /**
  25.  * Allocate frames for slab space and initialize
  26.  *
  27.  */
  28. static slab_t * slab_space_alloc(slab_cache_t *cache, int flags)
  29. {
  30.   void *data;
  31.   slab_t *slab;
  32.   size_t fsize;
  33.   unsigned int i;
  34.   u32_t p;
  35.  
  36.   data = (void*)PA2KA(core_alloc(cache->order));
  37.   if (!data) {
  38.     return NULL;
  39.   }
  40.   slab = (slab_t*)slab_create();
  41.   if (!slab) {
  42.     core_free(KA2PA(data));
  43.       return NULL;
  44.   }
  45.  
  46.   /* Fill in slab structures */
  47.   for (i = 0; i < ((u32_t) 1 << cache->order); i++)
  48.     frame_set_parent(ADDR2PFN(KA2PA(data)) + i, slab);
  49.  
  50.   slab->start = data;
  51.   slab->available = cache->objects;
  52.   slab->nextavail = (void*)data;
  53.   slab->cache = cache;
  54.  
  55.   for (i = 0, p = (u32_t)slab->start; i < cache->objects; i++)
  56.   {
  57.     *(addr_t *)p = p+cache->size;
  58.     p = p+cache->size;
  59.   };
  60.   atomic_inc(&cache->allocated_slabs);
  61.   return slab;
  62. }
  63.  
  64. /**
  65.  * Take new object from slab or create new if needed
  66.  *
  67.  * @return Object address or null
  68.  */
  69. static void * slab_obj_create(slab_cache_t *cache, int flags)
  70. {
  71.   slab_t *slab;
  72.   void *obj;
  73.  
  74.   spinlock_lock(&cache->slablock);
  75.  
  76.   if (list_empty(&cache->partial_slabs)) {
  77.     /* Allow recursion and reclaiming
  78.      * - this should work, as the slab control structures
  79.      *   are small and do not need to allocate with anything
  80.      *   other than frame_alloc when they are allocating,
  81.      *   that's why we should get recursion at most 1-level deep
  82.      */
  83.     slab = slab_space_alloc(cache, flags);
  84.     if (!slab)
  85.     {
  86.       spinlock_unlock(&cache->slablock);
  87.       return NULL;
  88.     }
  89.   } else {
  90.     slab = list_get_instance(cache->partial_slabs.next, slab_t, link);
  91.     list_remove(&slab->link);
  92.   }
  93.  
  94.   obj = slab->nextavail;
  95.   slab->nextavail = *(void**)obj;
  96.   slab->available--;
  97.  
  98.   if (!slab->available)
  99.     list_prepend(&slab->link, &cache->full_slabs);
  100.   else
  101.     list_prepend(&slab->link, &cache->partial_slabs);
  102.  
  103.   spinlock_unlock(&cache->slablock);
  104.  
  105. //  if (cache->constructor && cache->constructor(obj, flags)) {
  106.     /* Bad, bad, construction failed */
  107. //    slab_obj_destroy(cache, obj, slab);
  108. //    return NULL;
  109. //  }
  110.   return obj;
  111. }
  112.  
  113.  
  114. /** Map object to slab structure */
  115. static slab_t * obj2slab(void *obj)
  116. {
  117.   return (slab_t *) frame_get_parent(ADDR2PFN(KA2PA(obj)));
  118. }
  119.  
  120.  
  121. /** Allocate new object from cache - if no flags given, always returns
  122.     memory */
  123. void* __fastcall slab_alloc(slab_cache_t *cache, int flags)
  124. {
  125.    eflags_t efl;
  126.    void *result = NULL;
  127.  
  128.   /* Disable interrupts to avoid deadlocks with interrupt handlers */
  129.    efl = safe_cli();
  130.  
  131.  // if (!(cache->flags & SLAB_CACHE_NOMAGAZINE)) {
  132.  //   result = magazine_obj_get(cache);
  133.  // }
  134. //  if (!result)
  135.     result = slab_obj_create(cache, flags);
  136.  
  137.    safe_sti(efl);
  138.  
  139. //  if (result)
  140. //    atomic_inc(&cache->allocated_objs);
  141.  
  142.   return result;
  143. }
  144.  
  145.  
  146.  
  147. /**************************************/
  148. /* Slab cache functions */
  149.  
  150. /** Return number of objects that fit in certain cache size */
  151. static unsigned int comp_objects(slab_cache_t *cache)
  152. {
  153.   if (cache->flags & SLAB_CACHE_SLINSIDE)
  154.     return ((PAGE_SIZE << cache->order) - sizeof(slab_t)) / cache->size;
  155.   else
  156.     return (PAGE_SIZE << cache->order) / cache->size;
  157. }
  158.  
  159. /** Return wasted space in slab */
  160. static unsigned int badness(slab_cache_t *cache)
  161. {
  162.   unsigned int objects;
  163.   unsigned int ssize;
  164.   size_t val;
  165.  
  166.   objects = comp_objects(cache);
  167.   ssize = PAGE_SIZE << cache->order;
  168.   if (cache->flags & SLAB_CACHE_SLINSIDE)
  169.     ssize -= sizeof(slab_t);
  170.   val = ssize - objects * cache->size;
  171.   return val;
  172.  
  173. }
  174.  
  175.  
  176. /** Initialize allocated memory as a slab cache */
  177. static void
  178. _slab_cache_create(slab_cache_t *cache,
  179.        size_t size,
  180.        size_t align,
  181.        int (*constructor)(void *obj, int kmflag),
  182.        int (*destructor)(void *obj),
  183.        int flags)
  184. {
  185.   int pages;
  186.  // ipl_t ipl;
  187.  
  188. //  memsetb((uintptr_t)cache, sizeof(*cache), 0);
  189. //  cache->name = name;
  190.  
  191. //if (align < sizeof(unative_t))
  192. //    align = sizeof(unative_t);
  193. //  size = ALIGN_UP(size, align);
  194.  
  195.   cache->size = size;
  196.  
  197. //  cache->constructor = constructor;
  198. //  cache->destructor = destructor;
  199.   cache->flags = flags;
  200.  
  201.   list_initialize(&cache->full_slabs);
  202.   list_initialize(&cache->partial_slabs);
  203.   list_initialize(&cache->magazines);
  204. //  spinlock_initialize(&cache->slablock, "slab_lock");
  205. //  spinlock_initialize(&cache->maglock, "slab_maglock");
  206. //  if (! (cache->flags & SLAB_CACHE_NOMAGAZINE))
  207. //    make_magcache(cache);
  208.  
  209.   /* Compute slab sizes, object counts in slabs etc. */
  210.  
  211.   /* Minimum slab order */
  212.   pages = SIZE2FRAMES(cache->size);
  213.   /* We need the 2^order >= pages */
  214.   if (pages == 1)
  215.     cache->order = 0;
  216.   else
  217.     cache->order = fnzb(pages-1)+1;
  218.  
  219.   while (badness(cache) > SLAB_MAX_BADNESS(cache)) {
  220.     cache->order += 1;
  221.   }
  222.   cache->objects = comp_objects(cache);
  223.  
  224.   /* Add cache to cache list */
  225. //  ipl = interrupts_disable();
  226. //  spinlock_lock(&slab_cache_lock);
  227.  
  228.   list_append(&cache->link, &slab_cache_list);
  229.  
  230. //  spinlock_unlock(&slab_cache_lock);
  231. //  interrupts_restore(ipl);
  232. }
  233.  
  234. /** Create slab cache  */
  235. slab_cache_t * slab_cache_create(
  236.                                  size_t size,
  237.                                  size_t align,
  238.                                  int (*constructor)(void *obj, int kmflag),
  239.                                  int (*destructor)(void *obj),
  240.                                  int flags)
  241. {
  242.         slab_cache_t *cache;
  243.  
  244.         cache = (slab_cache_t*)slab_cache_alloc();
  245.   _slab_cache_create(cache, size, align, constructor, destructor, flags);
  246.         return cache;
  247. }
  248.  
  249. /**
  250.  * Deallocate space associated with slab
  251.  *
  252.  * @return number of freed frames
  253.  */
  254. static count_t slab_space_free(slab_cache_t *cache, slab_t *slab)
  255. {
  256.         frame_free(KA2PA(slab->start));
  257.         if (! (cache->flags & SLAB_CACHE_SLINSIDE))
  258.     slab_free(slab_cache, slab);
  259.  
  260. //      atomic_dec(&cache->allocated_slabs);
  261.  
  262.         return 1 << cache->order;
  263. }
  264.  
  265. /**
  266.  * Return object to slab and call a destructor
  267.  *
  268.  * @param slab If the caller knows directly slab of the object, otherwise NULL
  269.  *
  270.  * @return Number of freed pages
  271.  */
  272. static count_t slab_obj_destroy(slab_cache_t *cache, void *obj,
  273.                                 slab_t *slab)
  274. {
  275.         int freed = 0;
  276.  
  277.         if (!slab)
  278.                 slab = obj2slab(obj);
  279.  
  280. //      ASSERT(slab->cache == cache);
  281.  
  282. //  if (cache->destructor)
  283. //    freed = cache->destructor(obj);
  284.  
  285. //      spinlock_lock(&cache->slablock);
  286. //      ASSERT(slab->available < cache->objects);
  287.  
  288.         *(void**)obj = slab->nextavail;
  289.         slab->nextavail = obj;
  290.         slab->available++;
  291.  
  292.         /* Move it to correct list */
  293.         if (slab->available == cache->objects) {
  294.                 /* Free associated memory */
  295.                 list_remove(&slab->link);
  296. //              spinlock_unlock(&cache->slablock);
  297.  
  298.                 return freed + slab_space_free(cache, slab);
  299.  
  300.         } else if (slab->available == 1) {
  301.                 /* It was in full, move to partial */
  302.                 list_remove(&slab->link);
  303.                 list_prepend(&slab->link, &cache->partial_slabs);
  304.         }
  305. //      spinlock_unlock(&cache->slablock);
  306.         return freed;
  307. }
  308.  
  309.  
  310.  
  311. /** Return object to cache, use slab if known  */
  312. static void _slab_free(slab_cache_t *cache, void *obj, slab_t *slab)
  313. {
  314.    eflags_t efl;
  315.  
  316.    efl = safe_cli();
  317.  
  318. //      if ((cache->flags & SLAB_CACHE_NOMAGAZINE) \
  319. //      || magazine_obj_put(cache, obj)) {
  320.  
  321.                 slab_obj_destroy(cache, obj, slab);
  322.  
  323. //      }
  324.   safe_sti(efl);
  325.   atomic_dec(&cache->allocated_objs);
  326. }
  327.  
  328. /** Return slab object to cache */
  329. void __fastcall slab_free(slab_cache_t *cache, void *obj)
  330. {
  331.         _slab_free(cache, obj, NULL);
  332. }
  333.  
  334. static slab_t *slab_create()
  335. {
  336.   slab_t *slab;
  337.   void *obj;
  338.   u32_t p;
  339.  
  340. //  spinlock_lock(&cache->slablock);
  341.  
  342.   if (list_empty(&slab_cache->partial_slabs)) {
  343.     /* Allow recursion and reclaiming
  344.      * - this should work, as the slab control structures
  345.      *   are small and do not need to allocate with anything
  346.      *   other than frame_alloc when they are allocating,
  347.      *   that's why we should get recursion at most 1-level deep
  348.      */
  349. //    spinlock_unlock(&cache->slablock);
  350. //    slab = slab_create();
  351.  
  352.     void *data;
  353.     unsigned int i;
  354.  
  355.     data = (void*)PA2KA(core_alloc(0));
  356.     if (!data) {
  357.       return NULL;
  358.     }
  359.  
  360.     slab = (slab_t*)((u32_t)data + PAGE_SIZE - sizeof(slab_t));
  361.  
  362.     /* Fill in slab structures */
  363.     frame_set_parent(ADDR2PFN(KA2PA(data)), slab);
  364.  
  365.     slab->start = data;
  366.     slab->available = slab_cache->objects;
  367.     slab->nextavail = (void*)data;
  368.     slab->cache = slab_cache;
  369.  
  370.     for (i = 0,p = (u32_t)slab->start;i < slab_cache->objects; i++)
  371.     {
  372.       *(int *)p = p+slab_cache->size;
  373.       p = p+slab_cache->size;
  374.     };
  375.  
  376.  
  377.     atomic_inc(&slab_cache->allocated_slabs);
  378. //    spinlock_lock(&cache->slablock);
  379.   } else {
  380.     slab = list_get_instance(slab_cache->partial_slabs.next, slab_t, link);
  381.     list_remove(&slab->link);
  382.   }
  383.   obj = slab->nextavail;
  384.   slab->nextavail = *((void**)obj);
  385.   slab->available--;
  386.  
  387.   if (!slab->available)
  388.     list_prepend(&slab->link, &slab_cache->full_slabs);
  389.   else
  390.     list_prepend(&slab->link, &slab_cache->partial_slabs);
  391.  
  392. //  spinlock_unlock(&cache->slablock);
  393.  
  394.   return (slab_t*)obj;
  395. }
  396.  
  397. static slab_cache_t * slab_cache_alloc()
  398. {
  399.   slab_t *slab;
  400.   void *obj;
  401.   u32_t *p;
  402.  
  403.   if (list_empty(&slab_cache_cache.partial_slabs)) {
  404.     /* Allow recursion and reclaiming
  405.      * - this should work, as the slab control structures
  406.      *   are small and do not need to allocate with anything
  407.      *   other than frame_alloc when they are allocating,
  408.      *   that's why we should get recursion at most 1-level deep
  409.      */
  410. //    spinlock_unlock(&cache->slablock);
  411. //    slab = slab_create();
  412.  
  413.     void *data;
  414.     unsigned int i;
  415.  
  416.     data = (void*)(PA2KA(core_alloc(0)));
  417.     if (!data) {
  418.       return NULL;
  419.     }
  420.  
  421.     slab = (slab_t*)((u32_t)data + PAGE_SIZE - sizeof(slab_t));
  422.  
  423.     /* Fill in slab structures */
  424.     frame_set_parent(ADDR2PFN(KA2PA(data)), slab);
  425.  
  426.     slab->start = data;
  427.     slab->available = slab_cache_cache.objects;
  428.     slab->nextavail = (void*)data;
  429.     slab->cache = &slab_cache_cache;
  430.  
  431.     for (i = 0,p = (u32_t*)slab->start;i < slab_cache_cache.objects; i++)
  432.     {
  433.       *p = (u32_t)p+slab_cache_cache.size;
  434.       p = (u32_t*)((u32_t)p+slab_cache_cache.size);
  435.     };
  436.  
  437.  
  438.     atomic_inc(&slab_cache_cache.allocated_slabs);
  439. //    spinlock_lock(&cache->slablock);
  440.   } else {
  441.     slab = list_get_instance(slab_cache_cache.partial_slabs.next, slab_t, link);
  442.     list_remove(&slab->link);
  443.   }
  444.   obj = slab->nextavail;
  445.   slab->nextavail = *((void**)obj);
  446.   slab->available--;
  447.  
  448.   if (!slab->available)
  449.     list_prepend(&slab->link, &slab_cache_cache.full_slabs);
  450.   else
  451.     list_prepend(&slab->link, &slab_cache_cache.partial_slabs);
  452.  
  453. //  spinlock_unlock(&cache->slablock);
  454.  
  455.   return (slab_cache_t*)obj;
  456. }
  457.  
  458. void slab_cache_init(void)
  459. {
  460.  
  461.   _slab_cache_create(&slab_cache_cache, sizeof(slab_cache_t),
  462.                      sizeof(void *), NULL, NULL,
  463.                      SLAB_CACHE_NOMAGAZINE | SLAB_CACHE_SLINSIDE);
  464.  
  465.         /* Initialize external slab cache */
  466.   slab_cache = slab_cache_create(sizeof(slab_t),
  467.                                               0, NULL, NULL,SLAB_CACHE_MAGDEFERRED);
  468. };
  469.  
  470.