因为free源代码和malloc源码都是在malloc.c定义的,所以用到的宏几乎差不多这里就不列出来了,详情见上一篇文章:malloc关键源码分析
__libc_free
同malloc一样我们使用的free函数也是封装过的
void
__libc_free (void *mem)
{
mstate ar_ptr;
mchunkptr p; /* chunk corresponding to mem */
void (*hook) (void *, const void *)
= atomic_forced_read (__free_hook); //检查钩子函数
if (__builtin_expect (hook != NULL, 0))
{
(*hook)(mem, RETURN_ADDRESS (0));
return;
}
if (mem == 0) //free(0) 直接退出函数 /* free(0) has no effect */
return;
p = mem2chunk (mem); //转换为对应chunk指针
/*如果该chunk是mmaped,那么将会使用munmap(p)方案*/
if (chunk_is_mmapped (p)) /* release mmapped memory. */
{
/* see if the dynamic brk/mmap threshold needs adjusting */
if (!mp_.no_dyn_threshold
&& p->size > mp_.mmap_threshold
&& p->size <= DEFAULT_MMAP_THRESHOLD_MAX)
{
mp_.mmap_threshold = chunksize (p);
mp_.trim_threshold = 2 * mp_.mmap_threshold;
LIBC_PROBE (memory_mallopt_free_dyn_thresholds, 2,
mp_.mmap_threshold, mp_.trim_threshold);
}
munmap_chunk (p); //释放
return;
}
如果不是mmaped_chunk则:
void
__libc_free (void *mem)
{
mstate ar_ptr;
mchunkptr p; /* chunk corresponding to mem */
void (*hook) (void *, const void *)
= atomic_forced_read (__free_hook); //检查钩子函数
if (__builtin_expect (hook != NULL, 0))
{
(*hook)(mem, RETURN_ADDRESS (0));
return;
}
if (mem == 0) //free(0) 直接退出函数 /* free(0) has no effect */
return;
p = mem2chunk (mem); //转换为对应chunk指针
if (chunk_is_mmapped (p)) /* release mmapped memory. */
{
....
...
..
}
ar_ptr = arena_for_chunk (p); //获取对应arena指针
_int_free (ar_ptr, p, 0); //执行真正的释放函数
}
所以libc_free 检查了:
- 钩子函数
- mmaped
_int_free
数据声明
static void _int_free(mstate av, mchunkptr p, int have_lock) {
INTERNAL_SIZE_T size; /* its size */
mfastbinptr * fb; /* associated fastbin */
mchunkptr nextchunk; /* next contiguous chunk */
INTERNAL_SIZE_T nextsize; /* its size */
int nextinuse; /* true if nextchunk is used */
INTERNAL_SIZE_T prevsize; /* size of previous contiguous chunk */
mchunkptr bck; /* misc temp for linking */
mchunkptr fwd; /* misc temp for linking */
const char *errstr = NULL;
int locked = 0;
size = chunksize(p); //获取chunk p的size
检查chunk是否合理
/* Little security check which won't hurt performance: the
allocator never wrapps around at the end of the address space.
Therefore we can exclude some size values which might appear
here by accident or by "design" from some intruder. */
/*typedef unsigned long int uintptr_t;适配*/
/*1:p指向合法。2:p的大小对齐*/
if (__builtin_expect ((uintptr_t) p > (uintptr_t) -size, 0)
|| __builtin_expect (misaligned_chunk (p), 0)) //-size进行unsigned long int强制转换后是一个0xffff开头的数,如在64位下-0x90=====>0xffffffffffffff70
{ //0x602000这是其chunk的地址
errstr = "free(): invalid pointer";
errout:
if (!have_lock && locked)
(void) mutex_unlock (&av->mutex);
malloc_printerr (check_action, errstr, chunk2mem (p), av);
return;
}
/* We know that each chunk is at least MINSIZE bytes in size or a
multiple of MALLOC_ALIGNMENT. */
/*1:其size至少大于最小size 2:对齐*/
if (__glibc_unlikely (size < MINSIZE || !aligned_OK (size)))
{
errstr = "free(): invalid size";
goto errout;
}
- p指向地址合法
- size对齐且大于MINSIZE
看是否可以放入fastbins
/*
If eligible, place chunk on a fastbin so it can be found
and used quickly in malloc.
*/
/*如果size小于等于最大fastbins,则可以进行下面的操作*/
if ((unsigned long)(size) <= (unsigned long)(get_max_fast ())
//默认 #define TRIM_FASTBINS 0,因此默认情况下下面的语句不会执行
#if TRIM_FASTBINS
/*
If TRIM_FASTBINS set, don't place chunks
bordering top into fastbins
*/
&& (chunk_at_offset(p, size) != av->top)
#endif
) {
放入fastbins
物理相邻下一个chunk进行大小检查
/*1:物理相邻下一个chunk不能过小,小于等于2 * SIZE_SZ 2:以及不能过大,大于system_mem*/ //否则报错
if (__builtin_expect (chunk_at_offset (p, size)->size <= 2 * SIZE_SZ, 0)
|| __builtin_expect (chunksize (chunk_at_offset (p, size))
>= av->system_mem, 0))
{
/* We might not have a lock at this point and concurrent modifications
of system_mem might have let to a false positive. Redo the test
after getting the lock. */
if (have_lock
|| ({ assert (locked == 0);
mutex_lock(&av->mutex);
locked = 1;
chunk_at_offset (p, size)->size <= 2 * SIZE_SZ
|| chunksize (chunk_at_offset (p, size)) >= av->system_mem;
}))
{
errstr = "free(): invalid next size (fast)";
goto errout;
}
if (! have_lock)
{
(void)mutex_unlock(&av->mutex);
locked = 0;
}
}
检查:
- 物理相邻下一个chunk >= MINSIZE ,且 <= av->system_mem
插入对应链表
/*调用perturb_byte来将data区的数据覆盖,但得看perturb_byte,一般是0,也就是不会进行memest*/
free_perturb (chunk2mem(p), size - 2 * SIZE_SZ);
/*
static int perturb_byte;
....
...
static void
free_perturb (char *p, size_t n)
{
if (__glibc_unlikely (perturb_byte))
memset (p, perturb_byte, n);
}
*/
set_fastchunks(av);
unsigned int idx = fastbin_index(size);
/*mfastbinptr *fb 是一个二级指针*/
fb = &fastbin (av, idx); //获取对应下标表头地址
/* Atomically link P to its fastbin: P->FD = *FB; *FB = P; */
/*P->FD = *FB; *FB = P看这里就行下面两句看不懂 这里链接仅仅用了fd字段*/
mchunkptr old = *fb, old2;
unsigned int old_idx = ~0u;
do
{
/* Check that the top of the bin is not the record we are going to add
(i.e., double free). */
/*原来表头fd所放的就是old,这里是防止连续两次free同一个chunk*/
if (__builtin_expect (old == p, 0))
{
errstr = "double free or corruption (fasttop)";
goto errout;
}
/* Check that size of fastbin chunk at the top is the same as
size of the chunk that we are adding. We can dereference OLD
only if we have the lock, otherwise it might have already been
deallocated. See use of OLD_IDX below for the actual check. */
if (have_lock && old != NULL)
old_idx = fastbin_index(chunksize(old));
p->fd = old2 = old;
}
while ((old = catomic_compare_and_exchange_val_rel (fb, p, old2)) != old2);
if (have_lock && old != NULL && __builtin_expect (old_idx != idx, 0))
{
errstr = "invalid fastbin entry (free)";
goto errout;
}
}
要点:
- 插入链表时,仅用fd构成单向链表
- 检查最新插入的chunk与old_chunk是不是同一个
不能放入fastbins就进行合并操作
如果是mmaped chunk
/*
Consolidate other non-mmapped chunks as they arrive 不能放入fastbins就进行合并操作.
*/
else if (!chunk_is_mmapped(p)) {
if (! have_lock) {
(void)mutex_lock(&av->mutex);
locked = 1;
}
.....
...
..
/*
If the chunk was allocated via mmap, release via munmap().
*/
else {
munmap_chunk (p); //直接用munmap 释放
}
}
常规chunk,进行合并
nextchunk = chunk_at_offset(p, size); //获取p 物理相邻下一个chunk地址
/* Lightweight tests: check whether the block is already the
top block. 轻量级检查是不是top chunk */
if (__glibc_unlikely (p == av->top))
{
errstr = "double free or corruption (top)";
goto errout;
}
/* Or whether the next chunk is beyond the boundaries of the arena. nextchunk不能超过arena区域*/
if (__builtin_expect (contiguous (av)
&& (char *) nextchunk
>= ((char *) av->top + chunksize(av->top)), 0)) //这里通过top计算出了heap的最高地址
{
errstr = "double free or corruption (out)";
goto errout;
}
/* Or whether the block is actually not marked used. 检查nextchunk的inuse位 */
if (__glibc_unlikely (!prev_inuse(nextchunk)))
{
errstr = "double free or corruption (!prev)";
goto errout;
}
nextsize = chunksize(nextchunk);
/*1:下一个chunksize要大于2*SIZE_SZ 2:但不能大于av->system_mem*/
if (__builtin_expect (nextchunk->size <= 2 * SIZE_SZ, 0)
|| __builtin_expect (nextsize >= av->system_mem, 0))
{
errstr = "free(): invalid next size (normal)";
goto errout;
}
/*数据清理,但一般不会执行*/
free_perturb (chunk2mem(p), size - 2 * SIZE_SZ);
/* consolidate backward 低地址合并 */
if (!prev_inuse(p)) {
prevsize = p->prev_size; //物理相邻前一个chunk 大小
size += prevsize; //计算合并后的总size
p = chunk_at_offset(p, -((long) prevsize)); //更新chunk 指针为低地址chunk的指针
unlink(av, p, bck, fwd); //对低地址chunk执行unlink
}
/*下一个chunk不是top*/
if (nextchunk != av->top) {
/* get and clear inuse bit */
nextinuse = inuse_bit_at_offset(nextchunk, nextsize); //获取nextchunk的物理相邻下一个chunk的inuse位
/* consolidate forward 如果nextchunk也是freed 进行向后合并,注意这里没更新指针p*/
if (!nextinuse) {
unlink(av, nextchunk, bck, fwd); //对nextchunk进行unlink
size += nextsize; //更新size
} else
/*否则:*/
clear_inuse_bit_at_offset(nextchunk, 0); //将nextchun的inuse位置零
/*
Place the chunk in unsorted chunk list. Chunks are
not placed into regular bins until after they have
been given one chance to be used in malloc.
处理完后,被放入unsortedbin
*/
bck = unsorted_chunks(av); //获取表头
fwd = bck->fd; //获取unsortedbin的第一个chunk
if (__glibc_unlikely (fwd->bk != bck))
{
errstr = "free(): corrupted unsorted chunks";
goto errout;
}
/*进行插入操作,插入作为第一个chunk*/
p->fd = fwd;
p->bk = bck;
if (!in_smallbin_range(size)) // 如果是 large chunk,那就设置nextsize指针字段为NULL。
{
p->fd_nextsize = NULL;
p->bk_nextsize = NULL;
}
bck->fd = p;
fwd->bk = p;
/*设置head,以及foot信息*/
set_head(p, size | PREV_INUSE);
set_foot(p, size);
check_free_chunk(av, p);
}
/*
If the chunk borders the current high end of memory,
consolidate into top
*/
/*如果nextchunk是top则直接与top合并*/
else {
size += nextsize;
set_head(p, size | PREV_INUSE); //设置inuse位
av->top = p; //p视为新top
check_chunk(av, p);
}
/*
If freeing a large space, consolidate possibly-surrounding
chunks. Then, if the total unused topmost memory exceeds trim
threshold, ask malloc_trim to reduce top.
Unless max_fast is 0, we don't know if there are fastbins
bordering top, so we cannot tell for sure whether threshold
has been reached unless fastbins are consolidated. But we
don't want to consolidate on each free. As a compromise,
consolidation is performed if FASTBIN_CONSOLIDATION_THRESHOLD
is reached.
*/
/*当合并后的size大于FASTBIN_CONSOLIDATION_THRESHOLD,就会将heap向系统返还*/
if ((unsigned long)(size) >= FASTBIN_CONSOLIDATION_THRESHOLD) {
if (have_fastchunks(av)) // 如果有 fast chunk 就进行合并
malloc_consolidate(av);
if (av == &main_arena) {
#ifndef MORECORE_CANNOT_TRIM
if ((unsigned long)(chunksize(av->top)) >=
(unsigned long)(mp_.trim_threshold)) // top chunk 大于当前的收缩阙值
systrim(mp_.top_pad, av);
#endif
} else {
/* Always try heap_trim(), even if the top chunk is not
large, because the corresponding heap might go away. */
heap_info *heap = heap_for_ptr(top(av));
assert(heap->ar_ptr == av);
heap_trim(heap, mp_.top_pad);
}
}
if (! have_lock) {
assert (locked);
(void)mutex_unlock(&av->mutex);
}
}
要点:
- nextchunk不能超过arena区域
- 检查nextchunk的inuse位
- nextchunk要大于2*SIZE_SZ 2:但不能大于av->system_mem
- 低地址合并
- 高地址合并(nextchunk不是top)
- nextchunk是top直接与top合并
- 合并后插入unsortedbin
- 并设置head foot
- 如果合并后size大于FASTBIN_CONSOLIDATION_THRESHOLD则返还系统
总结
- __libc_free
- 检查hook,mmaped
- _int_free
- 物理相邻下一个chunk >= MINSIZE ,且 <= av->system_mem ,那么插入fastbins
- 插入链表时,仅用fd构成单向链表
- 检查最新插入的chunk与old_chunk是不是同一个
- 不能放入fastbins,就合并
- 检查nextchunk
- 低地址合并,高地址合并
- 插入unsortedbin
- 物理相邻下一个chunk >= MINSIZE ,且 <= av->system_mem ,那么插入fastbins