详解slab机制(3) slab分配机制

时间:2022-03-26 19:33:03

2.3slab分配机制:

不论kmalloc还是kmem_cache_alloc,最终都是调用函数__cache_alloc,这是给调用者分配slab的总接口:

static __always_inline void *

__cache_alloc(struct kmem_cache *cachep, gfp_t flags, void *caller)

{

         unsigned long save_flags;

         void *objp;

         flags &= gfp_allowed_mask;

         lockdep_trace_alloc(flags);

         if (slab_should_failslab(cachep, flags))

                   return NULL;

    /*分配前要调试检测*/

         cache_alloc_debugcheck_before(cachep, flags);

         local_irq_save(save_flags);

    /*实际分配*/

         objp = __do_cache_alloc(cachep, flags);

         local_irq_restore(save_flags);

    /*分配后调试检查*/

         objp = cache_alloc_debugcheck_after(cachep, flags, objp, caller);

    /*空函数*/

         kmemleak_alloc_recursive(objp, obj_size(cachep), 1, cachep->flags,

                                      flags);

         prefetchw(objp);

         if (likely(objp))

                   kmemcheck_slab_alloc(cachep, flags, objp, obj_size(cachep));

         if (unlikely((flags & __GFP_ZERO) && objp))

                   memset(objp, 0, obj_size(cachep));

         return objp;

}

只需关注函数__do_cache_alloc它调用函数____cache_alloc,这是重点

static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags)

{

         void *objp;

         struct array_cache *ac;

         check_irq_off();

    /*获取缓存的本地高速缓存的描述符array_cache*/

         ac = cpu_cache_get(cachep);

    /*如果本地高速缓存中没有可用的对象,那么就执行重新填充(cache_alloc_refill)*/

         if (likely(ac->avail)) {

        /*下面是空函数*/

                   STATS_INC_ALLOCHIT(cachep);

        /*avail的值减1,这样avail对应的空闲对象是最热的,即最近释放出来的,更有可能驻留在CPU高速缓存中*/

                   ac->touched = 1;

        /*由于ac是记录着这次struct arrary_cache结构体存放地址,通过ac_entry()后,我们就得到下一紧接地址,

          这个地址可以看做是为本高速缓存内存的内存对象指针存放首地址,

          这里可以看出,我们是从最后一个对象开始分配的*/

                   objp = ac->entry[--ac->avail];

         }

    else {

        /*下面是空函数*/

                   STATS_INC_ALLOCMISS(cachep);

        /*为高速缓存内存空间增加新的内存对象*/

                   objp = cache_alloc_refill(cachep, flags);

         }

         /*

          * To avoid a false negative, if an object that is in one of the

          * per-CPU caches is leaked, we need to make sure kmemleak doesn't

          * treat the array pointers as a reference to the object.

          */

         /*空函数*/

         kmemleak_erase(&ac->entry[ac->avail]);

         return objp;

}

首先获取cache在当前CPUarray成员,它就是所谓的本地缓存(local cache),获取slab就是从array获取,即便这个array成员里边没有slab,也是从cacheslab三链中把内存转给array,即便slab三链也没有slab,那么就让slab三链从伙伴系统buddy获取物理内存再转给array,这是slab分配的原理

static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep)

{

         return cachep->array[smp_processor_id()];

}

接下来的if判断,判断arrayavail成员是否为0即判断array里边是否有空闲的slab,可以回忆前面一节的cache创建,在函数setup_cpu_cache的中,不论是slab初始化完毕前还是初始化完毕后,avail成员都是置为0即当前没有空闲slab,结合这里一起验证了第一次通过kmalloc/kmem_cache_alloc获取物理内存时,会触发物理内存的实际分配,即函数____cache_allocelse分支;

如果avail成员非0说明目前array中有空闲的slab,那么就直接把它的第一个slab返回给kmalloc/kmem_cache_alloc调用者,并更新arrayavail(运算符即减一),同时置位arraytouched

如果avail成员为0说明目前array中没有空闲的slab,这就需要按照前面加深的描述的道理设法分配slab,调用函数cache_alloc_refill

static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags)

{

         int batchcount;

         struct kmem_list3 *l3;

         struct array_cache *ac;

         int node;

 

retry:

         check_irq_off();

         node = numa_node_id();

    /*本地高速缓存*/

         ac = cpu_cache_get(cachep);

    /*准备填充本地高速缓存,这里先记录填充对象个数,即batchcount成员(批量转入转出的个数)*/

         batchcount = ac->batchcount;

         if (!ac->touched && batchcount > BATCHREFILL_LIMIT) {

                   /*

                    * If there was little recent activity on this cache, then

                    * perform only a partial refill.  Otherwise we could generate

                    * refill bouncing.

                    */

                   batchcount = BATCHREFILL_LIMIT;

         }

    /*获得本内存节点、本cacheslab三链*/

         l3 = cachep->nodelists[node];

 

         BUG_ON(ac->avail > 0 || !l3);

         spin_lock(&l3->list_lock);

 

         /* See if we can refill from the shared array */

    /*如果有共享本地高速缓存,则从共享本地高速缓存填充

      仅用于多核,多个CPU共享的高速缓存*/

         if (l3->shared && transfer_objects(ac, l3->shared, batchcount))

                   goto alloc_done;

 

    /*从本地的高速缓存的kmem_list3slab三链表中分配*/

         while (batchcount > 0) {

                   struct list_head *entry;

                   struct slab *slabp;

                   /* Get slab alloc is to come from. */

        /*首先先访问部分空闲SLAB链表,使entry指向第一个节点*/

                   entry = l3->slabs_partial.next;

        /*如果半空闲的都没了,就找全空闲的*/

                   if (entry == &l3->slabs_partial) {

            /*在访问全空闲SLAB链表前先做一个标记,表示全空闲SLAB链表被使用过了*/

                            l3->free_touched = 1;

            /*使entry指向第一个节点*/

                            entry = l3->slabs_free.next;

            /*全空闲的也没了,必须扩充了*/

                            if (entry == &l3->slabs_free)

                                     goto must_grow;

                   }

        /*至少全部空闲或者是部分空闲SLAB链表有一个不为空,获取其slab描述符*/

                   slabp = list_entry(entry, struct slab, list);

        /*底下是两个空函数*/

                   check_slabp(cachep, slabp);

                   check_spinlock_acquired(cachep);

 

                   /*

                    * The slab was either on partial or free list so

                    * there must be at least one object available for

                    * allocation.

                    */

                   BUG_ON(slabp->inuse >= cachep->num);

        /*如果高速缓存还存在空闲对象,就用batchcount个对象进行填充*/

                   while (slabp->inuse < cachep->num && batchcount--) {

            /*一般情况下下面是3个空函数*/

                            STATS_INC_ALLOCED(cachep);

                            STATS_INC_ACTIVE(cachep);

                            STATS_SET_HIGH(cachep);

            /*slab中提取一个空闲对象,将其虚拟地址插入到local cache

              slab中提取一个空闲对象,每调用slab_get_obj一次,slabpinuse计数加1,并更新第一个空闲对象的索引*/

                            ac->entry[ac->avail++] = slab_get_obj(cachep, slabp,

                                                                     node);

                   }

        /*一般情况下空函数*/

                   check_slabp(cachep, slabp);

 

                   /* move slabp to correct slabp list: */

        /*slab从原先链表(full\parial\free)中删除*/

                   list_del(&slabp->list);

       

        /*根据目前的空缺情况,加在不同的链表上:

          slab中已经没有空闲对象,添加到slab三链的full slab链表中

          slab还有空闲对象,添加到slab三链的partial slab链表中*/

                   if (slabp->free == BUFCTL_END)

                            list_add(&slabp->list, &l3->slabs_full);

                   else

                            list_add(&slabp->list, &l3->slabs_partial);

         }

 

must_grow:

    /*前面从slab三链中添加avail个空闲对象到local cache中,更新slab三链的空闲对象数*/

         l3->free_objects -= ac->avail;

alloc_done:

         spin_unlock(&l3->list_lock);

 

         if (unlikely(!ac->avail)) {

                   int x;

        /*使用cache_grow为高速缓存分配一个新的slab

          参数分别是: cache指针、标志、内存节点、页虚拟地址(为空表示还未申请内存页,不为空,说明已申请内存页,可直接用来创建slab)

          返回值: 1为成功,0为失败*/

                   x = cache_grow(cachep, flags | GFP_THISNODE, node, NULL);

 

                   /* cache_grow can reenable interrupts, then ac could change. */

        /*上面的操作使能了中断,此期间local cache指针可能发生了变化,需要重新获得*/

                   ac = cpu_cache_get(cachep);

 

        /*无法新增空slablocal cache中也没有空闲对象,表明系统已经无法分配新的空闲对象了*/

                  if (!x && ac->avail == 0)   /* no objects in sight? abort */

                            return NULL;

       

        /*走到这有两种可能,

          第一种是无论新增空slab成功或失败,只要avail不为0,表明是其他进程重填了local cache,本进程就不需要重填了,不执行retry流程。

          第二种是avail0,并且新增空slab成功,则进入retry流程,利用新分配的空slab填充local cache*/

                   if (!ac->avail)             /* objects refilled by interrupt? */

                            goto retry;

         }

 

    /*重填了local cache,设置近期访问标志touch*/

         ac->touched = 1;

    /*返回local cache中最后一个空闲对象的虚拟地址*/

         return ac->entry[--ac->avail];

}

首先注意一下“batchcount = ac->batchcount;”,前面曾说过这是slab批量移入/移出的个数单位;

然后通过“l3 = cachep->nodelists[node];”获取cacheslab三链,

接下来的shared的操作个人认为可暂不关注,先考虑单CPU的情况(事实上理解slab原理后悔发现shared这个机制对于多CPU还是很有效率的)

然后是while循环,依次检测slab三链的半空链表、全空链表是否有空闲的slab,如果有就可以直接从里边取出转给array,这里先看没有空闲slab的情况,这就跳到标号must_grow

l3->free_objects -= ac->avail;

这里avail成员为0,相当于没减,关键是后面的alloc_done

正常情况下,因为avail值为0所以会进入if (unlikely(!ac->avail))分支,因为这时slab三链也没有空闲的slab,所以需要从伙伴系统获取物理内存,调用函数cache_grow,这也就是很多文章包括ULK描述的创建新的slab的两个条件:1array没有空闲slab2slab三链也没有空闲slab

static int cache_grow(struct kmem_cache *cachep,

                   gfp_t flags, int nodeid, void *objp)

{

         struct slab *slabp;

         size_t offset;

         gfp_t local_flags;

         struct kmem_list3 *l3;

         /*

          * Be lazy and only check for valid flags here,  keeping it out of the

          * critical path in kmem_cache_alloc().

          */

         BUG_ON(flags & GFP_SLAB_BUG_MASK);

         local_flags = flags & (GFP_CONSTRAINT_MASK|GFP_RECLAIM_MASK);

         /* Take the l3 list lock to change the colour_next on this node */

         check_irq_off();

    /*获取本内存节点的cache分配器的slab三链*/

         l3 = cachep->nodelists[nodeid];

             spin_lock(&l3->list_lock);

         /* Get colour for the slab, and cal the next value. */

    /*获取待创建的slab的颜色偏移*/

         offset = l3->colour_next;

    /*更新(下一次)待创建的slab的颜色偏移*/

         l3->colour_next++;

    /*颜色编号须小于颜色个数

      不能超过着色区的总大小,如果超过了,重置为0

      这就是前面分析过的着色循环问题。事实上,如果slab中浪费的空间很少,那么很快就会循环一次*/

         if (l3->colour_next >= cachep->colour)

                   l3->colour_next = 0;

         spin_unlock(&l3->list_lock);

    /*cache的着色块偏移*/

         offset *= cachep->colour_off;

         if (local_flags & __GFP_WAIT)

                   local_irq_enable();

         /*

          * The test for missing atomic flag is performed here, rather than

          * the more obvious place, simply to reduce the critical path length

          * in kmem_cache_alloc(). If a caller is seriously mis-behaving they

          * will eventually be caught here (where it matters).

          */

         kmem_flagcheck(cachep, flags);

         /*

          * Get mem for the objs.  Attempt to allocate a physical page from

          * 'nodeid'.

          */

         /*buddy获取物理页,返回的是虚拟地址objp*/

         if (!objp)

                   objp = kmem_getpages(cachep, local_flags, nodeid);

         if (!objp)

                   goto failed;

         /* Get slab management. */

/*获得一个新的slab描述符*/

         slabp = alloc_slabmgmt(cachep, objp, offset,

                            local_flags & ~GFP_CONSTRAINT_MASK, nodeid);

         if (!slabp)

                   goto opps1;

        /*slab描述符slabp赋给物理页的prev字段,把高速缓存描述符cachep赋给物理页的lru字段

      本质是建立slabcache到物理页的映射,用于快速根据物理页定位slab描述符和cache描述符*/

         slab_map_pages(cachep, slabp, objp);

    /*初始化cache描述符和slab对象描述符*/

         cache_init_objs(cachep, slabp);

         if (local_flags & __GFP_WAIT)

                   local_irq_disable();

         check_irq_off();

         spin_lock(&l3->list_lock);

         /* Make slab active. */

/*slab描述符slabp尾插法加入到高速缓存描述符的全空slab链表*/

         list_add_tail(&slabp->list, &(l3->slabs_free));

         STATS_INC_GROWN(cachep);

    /*更新高速缓存中空闲对象计数器*/

         l3->free_objects += cachep->num;

         spin_unlock(&l3->list_lock);

         return 1;

opps1:

         kmem_freepages(cachep, objp);

failed:

         if (local_flags & __GFP_WAIT)

                   local_irq_disable();

         return 0;

}

首先是获取slab三链指针l3,处理一下着色问题,关于着色问题后面专门描述,不影响理解slab分配先忽略;

然后是从伙伴系统获取物理页,调用函数kmem_getpages

static void *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, int nodeid)

{

         struct page *page;

         int nr_pages;

         int i;

 

#ifndef CONFIG_MMU

         /*

          * Nommu uses slab's for process anonymous memory allocations, and thus

          * requires __GFP_COMP to properly refcount higher order allocations

          */

         flags |= __GFP_COMP;

#endif

 

    /*这意味着,如果SLAB_RECLAIM_ACCOUNT置位,那么flag置位__GFP_RECLAIMABLE,意为: 分配给slab的页将被记录为可回收的页*/

         flags |= cachep->gfpflags;

         if (cachep->flags & SLAB_RECLAIM_ACCOUNT)

                   flags |= __GFP_RECLAIMABLE;

   

    /*buddy获取物理页,大小由cachep->gfporder决定(2^cachep->gfporder)*/

         page = alloc_pages_exact_node(nodeid, flags | __GFP_NOTRACK, cachep->gfporder);

         if (!page)

                   return NULL;

 

    /*计算出要获取的物理页个数(2^cachep->gfporder)*/

         nr_pages = (1 << cachep->gfporder);

   

    /*设置页的状态(是否可回收),在vmstat中设置*/

         if (cachep->flags & SLAB_RECLAIM_ACCOUNT)

                   add_zone_page_state(page_zone(page),

                            NR_SLAB_RECLAIMABLE, nr_pages);

         else

                   add_zone_page_state(page_zone(page),

                            NR_SLAB_UNRECLAIMABLE, nr_pages);

 

    /*把这些物理页设置属性为slab*/

         for (i = 0; i < nr_pages; i++)

                   __SetPageSlab(page + i);

 

    /*kmemcheck_enabled一般为0,不会执行下面*/

         if (kmemcheck_enabled && !(cachep->flags & SLAB_NOTRACK)) {

                   kmemcheck_alloc_shadow(page, cachep->gfporder, flags, nodeid);

 

                   if (cachep->ctor)

                            kmemcheck_mark_uninitialized_pages(page, nr_pages);

                   else

                            kmemcheck_mark_unallocated_pages(page, nr_pages);

         }

 

    /*返回该物理页的虚拟地址*/

         return page_address(page);

}

暂时直接看这个函数主要干了什么,主要就是调用函数alloc_pages_exact_node从伙伴系统申请物理页,需要注意申请的页数由cachegfporder成员决定,它是在创建这个长度的“规则”的cache时计算出来的(最多2),然后设置物理页的一些状态,注意下可回收标志这将在释放的时候有关系,最终返回的是该物理页在内存页表中映射的虚拟地址;现在变量objp保存了从伙伴系统这申请的物理页对应的虚拟地址;

然后是获取一个slab描述符,通过函数alloc_slabmgmt,这也是重点:

static struct slab *alloc_slabmgmt(struct kmem_cache *cachep, void *objp,

                                        int colour_off, gfp_t local_flags,

                                        int nodeid)

{

         struct slab *slabp;

 

         if (OFF_SLAB(cachep)) {

                   /* Slab management obj is off-slab. */

                   slabp = kmem_cache_alloc_node(cachep->slabp_cache,

                                                     local_flags, nodeid);

                   /*

                    * If the first object in the slab is leaked (it's allocated

                    * but no one has a reference to it), we want to make sure

                    * kmemleak does not treat the ->s_mem pointer as a reference

                    * to the object. Otherwise we will not report the leak.

                    */

                   kmemleak_scan_area(slabp, offsetof(struct slab, list),

                                        sizeof(struct list_head), local_flags);

                   if (!slabp)

                            return NULL;

         }

    /*对于内置slabslab描述符就在该slab所占空间的起始,即所占虚拟起始地址加上它的着色偏移

      然后应该更新着色偏移即加上管理对象的空间*/

    else {

                   slabp = objp + colour_off;

                   colour_off += cachep->slab_size;

         }

         slabp->inuse = 0;

    /*第一个对象的页内偏移,

      可见对于内置式slabcolouroff成员不仅包括着色区,还包括管理对象占用的空间,

              外置式slabcolouroff成员只包括着色区*/

         slabp->colouroff = colour_off;

    /*第一个对象的虚拟地址,这时着色偏移对于内置slab已加上了管理对象的空间*/

         slabp->s_mem = objp + colour_off;

         slabp->nodeid = nodeid;

    /*第一个空闲对象索引为0,即kmem_bufctl_t数组的第一个元素*/

         slabp->free = 0;

         return slabp;

}

这验证了前面描述过的,对于外置slab,它的管理对象即slab描述符和每个对象的对象描述符(其实就是编号)slab结构体变量需要从该结构体长度的cache申请,对象描述符不需要申请,而内置的情况slab管理对象是和slab申请的内存在一起的,确切的说它在slab申请的内存位置处,所以它的slabp就在slab申请的内存的着色处之后,而外置的slabp是另外申请的位置处,这就是为什么对于内置情况,着色偏移还要加上cachep->slab_size的原因(注意slab_size值为slab结构体长度+每个对象描述符之和),而外置情况不用;

这里容易理不清,细致的描述下:

对于外置,cachep->slabp_cache在创建本cache时即调用函数kmem_cache_create时已经初始化了它的slabp_cache成员,值为slab_size所处的长度分档的cache,对于外置还是内置都是结构体slab长度和每个对象描述符之和,外置下在本函数alloc_slabmgmtif判断中将从该cache(slab_size所处的长度分档的cache)申请一段内存用于存储所申请cacheslab管理对象,长度是结构体slab长度和每个对象描述符之和,这也就体现了外置的特点即slab管理对象在外部另外申请,如下图:

Slab结构体 + 每个对象描述符

Slab管理对象

Slab内存

着色偏移 + 每个对象

而内置是都在一起,如下图:

着色偏移 + slab结构体 + 每个对象描述符 +每个对象

Slab内存

注意外置和内置的slabp指针都是指向slab管理对象,但管理对象的位置不同,slab描述符的colouroff成员赋值为colour_off,但对于外置和内置,该值是不同的,内置还要多slab结构体及所有对象描述符的长度,这也就使外置和内置情况的slabs_mem成员即第一个对象的虚拟地址的值不一样,objp都是指向slab内存,但colour_off的不同使外置只需向后偏移着色偏移即可,而内置还需多偏移slab_size个长度;

最后,slabinuse成员标识当前正在使用的对象个数,初始值为0free成员标识第一个空闲对象的编号,初始值为0nodeid成员标识内存节点;

接下来是调用函数slab_map_pages,把slab描述符slabp赋给物理页的prev字段,把高速缓存描述符cachep赋给物理页的lru字段,本质是建立slabcache到物理页的映射,用于快速根据物理页定位slab描述符和cache描述符,可先不太关注;

接下来是调用cache_init_objs,初始化cache描述符和slab对象描述符:

static void cache_init_objs(struct kmem_cache *cachep,

                                struct slab *slabp)

{

         int i;

 

         for (i = 0; i < cachep->num; i++) {

                   void *objp = index_to_obj(cachep, slabp, i);

#if DEBUG

                   /* need to poison the objs? */

                   if (cachep->flags & SLAB_POISON)

                            poison_obj(cachep, objp, POISON_FREE);

                   if (cachep->flags & SLAB_STORE_USER)

                            *dbg_userword(cachep, objp) = NULL;

 

                   if (cachep->flags & SLAB_RED_ZONE) {

                            *dbg_redzone1(cachep, objp) = RED_INACTIVE;

                            *dbg_redzone2(cachep, objp) = RED_INACTIVE;

                   }

                   /*

                    * Constructors are not allowed to allocate memory from the same

                    * cache which they are a constructor for.  Otherwise, deadlock.

                    * They must also be threaded.

                    */

                   if (cachep->ctor && !(cachep->flags & SLAB_POISON))

                            cachep->ctor(objp + obj_offset(cachep));

 

                   if (cachep->flags & SLAB_RED_ZONE) {

                            if (*dbg_redzone2(cachep, objp) != RED_INACTIVE)

                                     slab_error(cachep, "constructor overwrote the"

                                                  " end of an object");

                            if (*dbg_redzone1(cachep, objp) != RED_INACTIVE)

                                     slab_error(cachep, "constructor overwrote the"

                                                  " start of an object");

                   }

                   if ((cachep->buffer_size % PAGE_SIZE) == 0 &&

                                OFF_SLAB(cachep) && cachep->flags & SLAB_POISON)

                            kernel_map_pages(virt_to_page(objp),

                                                cachep->buffer_size / PAGE_SIZE, 0);

#else

                   if (cachep->ctor)

                            cachep->ctor(objp);

#endif

        /*初始时所有对象都是空闲的,只需按照数组顺序串起来即可*/

                   slab_bufctl(slabp)[i] = i + 1;

         }

 

    /*最后一个指向BUFCTL_END*/

         slab_bufctl(slabp)[i - 1] = BUFCTL_END;

}

根据对象个数,利用循环定位每一个对象objp,并调用本cache的构造函数ctor初始化每个对象,但事实上大多数cache在创建时,ctor都是NULL即无需初始化每个对象;比较重要的是“slab_bufctl(slabp)[i] = i + 1”,它初始化每个对象的描述符为123……BUFCTL_END,这是对每个对象描述符的初始化;

到这里,这个slab已初始化好,内存也分配了,各个属性也初始化了,现在把它链在本cacheslab三链上,即         list_add_tail(&slabp->list, &(l3->slabs_free));”,并更新slab三链的空闲对象个数成员free_objects;最终返回1意为slab创建成功,cache_grow函数调用成功。

回到函数cache_alloc_refill,这时需要重新获取本cachearray,因为上面的操作使能了中断,此期间local cache指针可能发生了变化,然后最终做判断if (!ac->avail),如果有其他模块的操作填充了本cachearray那么直接return,多数情况下arrayavail还是0,返回标号retry,重新进行一次,这时因为slab三链肯定是有了空闲slab了,所以肯定可以在while循环中执行“ac->entry[ac->avail++] = slab_get_obj(cachep, slabp, node);”,即把slab转给array

static void *slab_get_obj(struct kmem_cache *cachep, struct slab *slabp,

                                     int nodeid)

{

    /*获得一个空闲的对象,free成员是本slab中第一个空闲对象的索引*/

         void *objp = index_to_obj(cachep, slabp, slabp->free);

         kmem_bufctl_t next;

 

    /*更新在用对象计数*/

         slabp->inuse++;

    /*获得第一个空闲对象的索引 */

         next = slab_bufctl(slabp)[slabp->free];

#if DEBUG

         slab_bufctl(slabp)[slabp->free] = BUFCTL_FREE;

         WARN_ON(slabp->nodeid != nodeid);

#endif

 

    /*free指向下一个空闲的对象*/

         slabp->free = next;

 

         return objp;

}

这个函数本身很明显,不断取出slab三链中的这个slab里的对象并同时更新该slab的空闲对象编号和可用空闲对象个数,要注意是在while循环中调用该函数,它是需要批量移出的即转给array共计batchcount个对象(在不超过该cache的对象个数前提下,正常情况下不会超出)

最终,这个slab的对象由全空闲减少了batchcount个对象,根据其是否还剩下对象的情况,把它从slab三链的全空闲链表中摘下放入半空闲链表或全满链表。

以上就是slab分配的过程和原理!