glibc-2.23 源码分析学习记录
-
环境准备
下载 glibc2.23 源码
sudo apt-get install glibc-source sudo apt-get install libc6-dbg sudo tar xf /usr/src/glibc/glibc-2.23.tar.xz
-
malloc
堆结构:
struct malloc_chunk { //如果前一块被释放,该处存储前一块的大小 INTERNAL_SIZE_T prev_size; /* Size of previous chunk (if free). */ //本块的大小,包含堆头 INTERNAL_SIZE_T size; /* Size in bytes, including overhead. */ // 对 free chunk 生效 struct malloc_chunk* fd; /* double links -- used only if free. */ struct malloc_chunk* bk; // 对于 largebins 生效 /* Only used for large blocks: pointer to next larger size. */ struct malloc_chunk* fd_nextsize; /* double links -- used only if free. */ struct malloc_chunk* bk_nextsize; };
标志位:|A|O|P|
/* size field is or'ed with PREV_INUSE when previous adjacent chunk in use */ #define PREV_INUSE 0x1 /* extract inuse bit of previous chunk */ #define prev_inuse(p) ((p)->size & PREV_INUSE) /* size field is or'ed with IS_MMAPPED if the chunk was obtained with mmap() */ #define IS_MMAPPED 0x2 /* check for mmap()'ed chunk */ #define chunk_is_mmapped(p) ((p)->size & IS_MMAPPED) /* size field is or'ed with NON_MAIN_ARENA if the chunk was obtained from a non-main arena. This is only set immediately before handing the chunk to the user, if necessary. */ #define NON_MAIN_ARENA 0x4 /* check for chunk from non-main arena */ #define chunk_non_main_arena(p) ((p)->size & NON_MAIN_ARENA)
P: 前一个堆块是否处于使用状态,使用为 1,被释放为 0(第一个堆块的 P 位为 1)
A: 是否为主线程分配的堆块
分配区(malloc_state):
struct malloc_state { /* Serialize access. */ // 互斥锁 mutex_t mutex; /* Flags (formerly in max_fast). */ // 表示当前 arena 中是否存在 fastbin 或者是否连续等 int flags; /* Fastbins */ // 存放每个 fastbin 链表的头指针,最多支持 bin 的个数为 10 mfastbinptr fastbinsY[NFASTBINS]; /* Base of the topmost chunk -- not otherwise kept in a bin */ // top chunk 堆头 mchunkptr top; /* The remainder from the most recent split of a small request */ //指向上一个 chunk 分配出一个 small chunk 之后剩余的部分 mchunkptr last_remainder; /* Normal bins packed as described above */ // 存储 unsortedbin mchunkptr bins[NBINS * 2 - 2]; /* Bitmap of bins */ // 每一个 bit 表示对应的 bin 中是否存在空闲 chunk unsigned int binmap[BINMAPSIZE]; /* Linked list */ struct malloc_state *next; /* Linked list for free arenas. Access to this field is serialized by free_list_lock in arena.c. */ struct malloc_state *next_free; /* Number of threads attached to this arena. 0 if the arena is on the free list. Access to this field is serialized by free_list_lock in arena.c. */ INTERNAL_SIZE_T attached_threads; /* Memory allocated from the system in this arena. */ // 当前内存分配量 INTERNAL_SIZE_T system_mem; INTERNAL_SIZE_T max_system_mem; };
主分配区和子分配区形成一个循环链表,每一个线程都存在一个私有的变量存放分配区指针,在分配内存的时候,查看哪个分配区没有上锁,就在哪个分配区分配内存,如果分配区全部被占用,则建立一个新的分配区。
具体的分配方式:
/* offset 2 to use otherwise unindexable first 2 bins */ // SIZE_SZ 64 /32 无符号整数 #define fastbin_index(sz) \ ((((unsigned int) (sz)) >> (SIZE_SZ == 8 ? 4 : 3)) - 2) // 减 2 是因为 chunk_size 在 64 位是从 0x20 开始,32 位从 0x10 开始 // MAX_FAS_SIZE 64=>160 字节,32=>80 字节 /* The maximum fastbin request size we support */ #define MAX_FAST_SIZE (80 * SIZE_SZ / 4) #define NFASTBINS (fastbin_index (request2size (MAX_FAST_SIZE)) + 1)
malloc 流程:
void * __libc_malloc (size_t bytes) { // 可用的分配区 mstate ar_ptr; void *victim; // 读取 malloc_hook void *(*hook) (size_t, const void *) = atomic_forced_read (__malloc_hook); // 查看是否设置 malloc_hook if (__builtin_expect (hook != NULL, 0)) // 若被设置则直接调用,常用的将 malloc_hook 改为 system 就发生在这里 return (*hook)(bytes, RETURN_ADDRESS (0)); // 调用 arena_get 获取一个可用的分配区 arena_get (ar_ptr, bytes); // 调用 _int_malloc 函数分配内存 victim = _int_malloc (ar_ptr, bytes); /* Retry with another arena only if we were able to find a usable arena before. */ //如果 chunk 为空,但是 分配区指针不为空,再次调用 _int_malloc 获取 if (!victim && ar_ptr != NULL) { LIBC_PROBE (memory_malloc_retry, 1, bytes); ar_ptr = arena_get_retry (ar_ptr, bytes); victim = _int_malloc (ar_ptr, bytes); } // 给内存分配区 解锁 if (ar_ptr != NULL) (void) mutex_unlock (&ar_ptr->mutex); // 对分配的 chunk 指针进行地址检查 assert (!victim || chunk_is_mmapped (mem2chunk (victim)) || ar_ptr == arena_for_chunk (mem2chunk (victim))); return victim; }
_int_malloc
static void * _int_malloc (mstate av, size_t bytes) { INTERNAL_SIZE_T nb; /* normalized request size */ unsigned int idx; /* associated bin index */ mbinptr bin; /* associated bin */ mchunkptr victim; /* inspected/selected chunk */ INTERNAL_SIZE_T size; /* its size */ int victim_index; /* its bin index */ mchunkptr remainder; /* remainder from a split */ unsigned long remainder_size; /* its size */ unsigned int block; /* bit map traverser */ unsigned int bit; /* bit map traverser */ unsigned int map; /* current word of binmap */ mchunkptr fwd; /* misc temp for linking */ mchunkptr bck; /* misc temp for linking */ const char *errstr = NULL; /* Convert request size to internal form by adding SIZE_SZ bytes overhead plus possibly more to obtain necessary alignment and/or to obtain a size of at least MINSIZE, the smallest allocatable size. Also, checked_request2size traps (returning 0) request sizes that are so large that they wrap around zero when padded and aligned. */ // 将用户请求的 size 对其,并判断传入的参数是否符合要求 checked_request2size (bytes, nb); /* There are no usable arenas. Fall back to sysmalloc to get a chunk from mmap. */ if (__glibc_unlikely (av == NULL)) { // 如果没有能用的 malloc,用 sysmalloc 分配新的堆区 void *p = sysmalloc (nb, av); if (p != NULL) alloc_perturb (p, bytes); return p; }
接着是从 fastbin 中找可用的堆块
// 判断大小是否在 fastbin 的范围 if ((unsigned long) (nb) <= (unsigned long) (get_max_fast ())) { // 根据 nb 获取 fastbins 数组的下标 idx = fastbin_index (nb); // 得到链表头指针 mfastbinptr *fb = &fastbin (av, idx); mchunkptr pp = *fb; // 如果当前链表存在 chunk,则分配该链表的第一个 chunk do { victim = pp; if (victim == NULL) break; } while ((pp = catomic_compare_and_exchange_val_acq (fb, victim->fd, victim)) != victim); // 如果找到了能用的 chunk,还需要检查所分配的 chunk 的 size 与所在链表的 size if (victim != 0) { if (__builtin_expect (fastbin_index (chunksize (victim)) != idx, 0)) { errstr = "malloc(): memory corruption (fast)"; errout: malloc_printerr (check_action, errstr, chunk2mem (victim), av); return NULL; } // 对 chunk 结构体的标志位进行检查,主要检查该 chunk 是否是 malloc_state 所表示的分配区内,包括是否重复分配和一些大小和地址的检查 check_remalloced_chunk (av, victim, nb); //根据 chunk 得到用户数据指针 p,并返回 void *p = chunk2mem (victim); alloc_perturb (p, bytes); return p; } }
如果分配出错,会调用 malloc_printerr 打印错误信息
malloc_printerr (int action, const char *str, void *ptr, mstate ar_ptr) { /* Avoid using this arena in future. We do not attempt to synchronize this with anything else because we minimally want to ensure that __libc_message gets its resources safely without stumbling on the current corruption. */ if (ar_ptr) set_arena_corrupt (ar_ptr); if ((action & 5) == 5) __libc_message (action & 2, "%s\n", str); else if (action & 1) { char buf[2 * sizeof (uintptr_t) + 1]; buf[sizeof (buf) - 1] = '\0'; char *cp = _itoa_word ((uintptr_t) ptr, &buf[sizeof (buf) - 1], 16, 0); while (cp > buf) *--cp = '0'; __libc_message (action & 2, "*** Error in `%s': %s: 0x%s ***\n", __libc_argv[0] ? : "<unknown>", str, cp); } else if (action & 2) abort (); }
在 2.23 中,abort 会调用 fflush stream,从而调用 _IO_FILE_plus.vtable 中的 _IO_overflow(FSOP 的原理)
继续看 small bin 的情况:
// 范围属于 small bin if (in_smallbin_range (nb)) { // 根据要分配的 chunk size 获得 small bin 数组的下标 idx = smallbin_index (nb); // 得到 small bin 的链表头地址 bin = bin_at (av, idx); // 这里会获取 small bin 的最后一个 chunk,如果 victim = bin,说明 small bin 是空的 if ((victim = last (bin)) != bin) { // 合并 fastbin 到 unsortedin if (victim == 0) /* initialization check */ malloc_consolidate (av); else { // 获取 small bin 的倒数第二个 chunk bck = victim->bk; // 检查倒数第二个 chunk 的 fd 指针是否指向最后一个 chunk,防止伪造 if (__glibc_unlikely (bck->fd != victim)) { errstr = "malloc(): smallbin double linked list corrupted"; goto errout; } // 设置 victim 的下一个 chunk 的 inuse 位 set_inuse_bit_at_offset (victim, nb); // 将最后一个 chunk 从 small bin 中解链 bin->bk = bck; bck->fd = bin; // 如果不是主线程则要设置 A 标志位 if (av != &main_arena) victim->size |= NON_MAIN_ARENA; check_malloced_chunk (av, victim, nb); void *p = chunk2mem (victim); alloc_perturb (p, bytes); return p; } } }
当 small bin 没有初始化时,所有指针均为空,这时程序会调用 malloc_consolidate 将 fastbin 链表中的所有 chunk 合并并插入到 unsortedbin 中。
small bin 不满足,则会进入 large bin
else { idx = largebin_index (nb); // 合并 fastbin 到 unsortedbin if (have_fastchunks (av)) malloc_consolidate (av); }
经历过上述分支后,进入 unsorted bin,会从 last remainder chunk,large bins 和 top chunk 中分配所需的 chunk。
大致流程如下:
1.查找 unsortedbin 是否为空 2. 当 unsortedbin 中只有一个 chunk 且为 last_remainder 时,进行 last_remainder 切分并直接返回。 3.如果有多个 chunk + 1. 先将 chunk 从 unsortedbin 中解链,如果大小满足就直接返回 + 2. 如果这个 chunk 大小不满足要求,先后判断是否满足 small bin 和 large bin 的大小,并分别插入对应的 bin 中 + 3. 处理下一个 chunk
// 开始处理 unsortedbin for (;; ) { int iters = 0; // 如果 unsortedbin 不为空 while ((victim = unsorted_chunks (av)->bk) != unsorted_chunks (av)) { bck = victim->bk; // 判断当前申请的 size 是否合法,大于 2*SIZE_SZ,小于 av->system_mem if (__builtin_expect (victim->size <= 2 * SIZE_SZ, 0) || __builtin_expect (victim->size > av->system_mem, 0)) malloc_printerr (check_action, "malloc(): memory corruption", chunk2mem (victim), av); // 合法则获取 size size = chunksize (victim); /* If a small request, try to use last remainder if it is the only chunk in unsorted bin. This helps promote locality for runs of consecutive small requests. This is the only exception to best-fit, and applies only when there is no exact fit for a small chunk. */ // 如果申请的大小在 small bin 的范围中,且 unsorted bin 链表中只有一个 chunk 并指向 last_remainder,且该 chunk 的 size 大小大于用户申请的 size 大小 if (in_smallbin_range (nb) && bck == unsorted_chunks (av) && victim == av->last_remainder && (unsigned long) (size) > (unsigned long) (nb + MINSIZE)) { /* split and reattach remainder */ // 对该 chunk 进行拆分,获取剩下的 last_remainder 的大小和开始地址 remainder_size = size - nb; remainder = chunk_at_offset (victim, nb); // 将 unsorted bin 的 bk 指针指向更新的 remainder unsorted_chunks (av)->bk = unsorted_chunks (av)->fd = remainder; // 更新分配区 last_remainder 的值 av->last_remainder = remainder; // reaminder 的 bk 和 fd 都设置为 unsorted bin remainder->bk = remainder->fd = unsorted_chunks (av); // 如果 remainder_size 的大小不属于 small_bin,则需要设置 nextsize if (!in_smallbin_range (remainder_size)) { remainder->fd_nextsize = NULL; remainder->bk_nextsize = NULL; } // 设置堆头标志位 set_head (victim, nb | PREV_INUSE | (av != &main_arena ? NON_MAIN_ARENA : 0)); set_head (remainder, remainder_size | PREV_INUSE); set_foot (remainder, remainder_size); // 检查分配的 chunk check_malloced_chunk (av, victim, nb); void *p = chunk2mem (victim); alloc_perturb (p, bytes); return p; } /* remove from unsorted list */ // 当前的 chunk 要被分配了,需要从 unsortedbin 中解除 // 如果 av 的 bk 可控,就能向 bk 的fd 之阵中写入 av 的值(unsortedbin attack) unsorted_chunks (av)->bk = bck; bck->fd = unsorted_chunks (av); /* Take now instead of binning if exact fit */ // 如果用户申请的 size 刚好与当前解链的 chunk 大小相同,则直接返回 if (size == nb) { set_inuse_bit_at_offset (victim, size); if (av != &main_arena) victim->size |= NON_MAIN_ARENA; check_malloced_chunk (av, victim, nb); void *p = chunk2mem (victim); alloc_perturb (p, bytes); return p; } /* place chunk in bin */ // 如果分出来的这个 chunk 大小属于 small bin,则放到 small bin 中,否则插入 large bin 中 if (in_smallbin_range (size)) { victim_index = smallbin_index (size); bck = bin_at (av, victim_index); fwd = bck->fd; } else { victim_index = largebin_index (size); bck = bin_at (av, victim_index); fwd = bck->fd; /* maintain large bins in sorted order */ // largebin 非空 if (fwd != bck) { /* Or with inuse bit to speed comparisons */ // 去除 p 标志位 size |= PREV_INUSE; /* if smaller than smallest, bypass loop below */ // 检查 A 标志位 assert ((bck->bk->size & NON_MAIN_ARENA) == 0); // 如果 chunk 的 size 比 large bins 的最小堆块还小 if ((unsigned long) (size) < (unsigned long) (bck->bk->size)) { // 指向链表的最后一个堆块 fwd = bck; bck = bck->bk; // 则将当前的 chunk 插入到 large bin 的最后一块 victim->fd_nextsize = fwd->fd; victim->bk_nextsize = fwd->fd->bk_nextsize; fwd->fd->bk_nextsize = victim->bk_nextsize->fd_nextsize = victim; } // 否则需要插入到合适的位置 else { assert ((fwd->size & NON_MAIN_ARENA) == 0); // 遍历查找合适的位置 while ((unsigned long) size < fwd->size) { fwd = fwd->fd_nextsize; assert ((fwd->size & NON_MAIN_ARENA) == 0); } // 要求 fwd 指向的大小要小于等于当前的 chunk if ((unsigned long) size == (unsigned long) fwd->size) /* Always insert in the second position. */ fwd = fwd->fd; else { victim->fd_nextsize = fwd; victim->bk_nextsize = fwd->bk_nextsize; fwd->bk_nextsize = victim; victim->bk_nextsize->fd_nextsize = victim; } bck = fwd->bk; } } // 若 large bin 为空则直接插入即可 else victim->fd_nextsize = victim->bk_nextsize = victim; } // 还需要在大小相同的 largebin chunk 组成的数组中进行插入 mark_bin (av, victim_index); victim->bk = bck; victim->fd = fwd; fwd->bk = victim; bck->fd = victim; #define MAX_ITERS 10000 if (++iters >= MAX_ITERS) break; }
如果 unsorted bin 中没有找到合适的 chunk,就会遍历查找请求的 size 对应 bin 的 chunk,这一段是 large bin 的范围
if (!in_smallbin_range (nb)) { bin = bin_at (av, idx); /* skip scan if empty or largest chunk is too small */ // 如果用户请求的 size 小于 large bin 中最大的 chunk size if ((victim = first (bin)) != bin && (unsigned long) (victim->size) >= (unsigned long) (nb)) { // 获取一个刚好小于用户请求的 size 的 large bin victim = victim->bk_nextsize; while (((unsigned long) (size = chunksize (victim)) < (unsigned long) (nb))) victim = victim->bk_nextsize; /* Avoid removing the first entry for a size so that the skip list does not have to be rerouted. */ if (victim != last (bin) && victim->size == victim->fd->size) victim = victim->fd; // 把这个 large bin chunk 进行拆分 remainder_size = size - nb; // 解链 unlink (av, victim, bck, fwd); /* Exhaust */ // 处理剩余部分 if (remainder_size < MINSIZE) { set_inuse_bit_at_offset (victim, size); if (av != &main_arena) victim->size |= NON_MAIN_ARENA; } /* Split */ else { // 将剩余部分 chunk 插入 unsortedbin 中 remainder = chunk_at_offset (victim, nb); /* We cannot assume the unsorted list is empty and therefore have to perform a complete insert here. */ bck = unsorted_chunks (av); fwd = bck->fd; if (__glibc_unlikely (fwd->bk != bck)) { errstr = "malloc(): corrupted unsorted chunks"; goto errout; } remainder->bk = bck; remainder->fd = fwd; bck->fd = remainder; fwd->bk = remainder; // remainder 不满足 small bin if (!in_smallbin_range (remainder_size)) { remainder->fd_nextsize = NULL; remainder->bk_nextsize = NULL; } // 设置分配的 chunk 堆头 set_head (victim, nb | PREV_INUSE | (av != &main_arena ? NON_MAIN_ARENA : 0)); set_head (remainder, remainder_size | PREV_INUSE); set_foot (remainder, remainder_size); } check_malloced_chunk (av, victim, nb); void *p = chunk2mem (victim); alloc_perturb (p, bytes); return p; } }
如果 large bin 中也没有符合要求的 chunk,则在其它 size 的 large bin 中进行查找
++idx; bin = bin_at (av, idx); block = idx2block (idx); map = av->binmap[block]; bit = idx2bit (idx); for (;; ) { /* Skip rest of block if there are no more set bits in this block. */ // 找到存在空闲 chunk 的 large bin 链表 if (bit > map || bit == 0) { do { if (++block >= BINMAPSIZE) /* out of bins */ goto use_top; } while ((map = av->binmap[block]) == 0); bin = bin_at (av, (block << BINMAPSHIFT)); bit = 1; } /* Advance to bin with set bit. There must be one. */ // 在 block 中找到存在符合要求的 large bin 链表头指针 while ((bit & map) == 0) { bin = next_bin (bin); bit <<= 1; assert (bit != 0); } /* Inspect the bin. It is likely to be non-empty */ victim = last (bin); /* If a false alarm (empty bin), clear the bit. */ // 链表非空 if (victim == bin) { av->binmap[block] = map &= ~bit; /* Write through */ bin = next_bin (bin); bit <<= 1; } // 找到第一个大于申请 size 的 chunk 进行堆块划分 // 具体操作和上面的类似 else { size = chunksize (victim); /* We know the first chunk in this bin is big enough to use. */ assert ((unsigned long) (size) >= (unsigned long) (nb)); remainder_size = size - nb; /* unlink */ unlink (av, victim, bck, fwd); /* Exhaust */ if (remainder_size < MINSIZE) { set_inuse_bit_at_offset (victim, size); if (av != &main_arena) victim->size |= NON_MAIN_ARENA; } /* Split */ else { remainder = chunk_at_offset (victim, nb); /* We cannot assume the unsorted list is empty and therefore have to perform a complete insert here. */ bck = unsorted_chunks (av); fwd = bck->fd; if (__glibc_unlikely (fwd->bk != bck)) { errstr = "malloc(): corrupted unsorted chunks 2"; goto errout; } remainder->bk = bck; remainder->fd = fwd; bck->fd = remainder; fwd->bk = remainder; /* advertise as last remainder */ if (in_smallbin_range (nb)) av->last_remainder = remainder; if (!in_smallbin_range (remainder_size)) { remainder->fd_nextsize = NULL; remainder->bk_nextsize = NULL; } set_head (victim, nb | PREV_INUSE | (av != &main_arena ? NON_MAIN_ARENA : 0)); set_head (remainder, remainder_size | PREV_INUSE); set_foot (remainder, remainder_size); } check_malloced_chunk (av, victim, nb); void *p = chunk2mem (victim); alloc_perturb (p, bytes); return p; } }
如果在 large bin 中没有找到响应的 chunk,则需要在 top chunk 中查找
use_top: /* If large enough, split off the chunk bordering the end of memory (held in av->top). Note that this is in accord with the best-fit search rule. In effect, av->top is treated as larger (and thus less well fitting) than any other available chunk since it can be extended to be as large as necessary (up to system limitations). We require that av->top always exists (i.e., has size >= MINSIZE) after initialization, so if it would otherwise be exhausted by current request, it is replenished. (The main reason for ensuring it exists is that we may need MINSIZE space to put in fenceposts in sysmalloc.) */ // 获取 top chunk 的堆头地址和大小 victim = av->top; size = chunksize (victim); // 如果 top chunk 的 size 满足用户请求的 size if ((unsigned long) (size) >= (unsigned long) (nb + MINSIZE)) { // 将 top chunk 进行划分 remainder_size = size - nb; remainder = chunk_at_offset (victim, nb); av->top = remainder; set_head (victim, nb | PREV_INUSE | (av != &main_arena ? NON_MAIN_ARENA : 0)); set_head (remainder, remainder_size | PREV_INUSE); check_malloced_chunk (av, victim, nb); void *p = chunk2mem (victim); alloc_perturb (p, bytes); return p; } /* When we are using atomic ops to free fast chunks we can get here for all block sizes. */ // 如果 top chunk 不够,则合并 fastbin 到 small 或 large bin 中 else if (have_fastchunks (av)) { malloc_consolidate (av); /* restore original bin index */ if (in_smallbin_range (nb)) idx = smallbin_index (nb); else idx = largebin_index (nb); } /* Otherwise, relay to handle system-dependent cases */ // 如果还不够,则调用 sysmalloc 继续分配堆块 else { void *p = sysmalloc (nb, av); if (p != NULL) alloc_perturb (p, bytes); return p; } } }
-
free
__libc_free
void __libc_free (void *mem) { mstate ar_ptr; mchunkptr p; /* chunk corresponding to mem */ // 判断是否设置了 free_hook void (*hook) (void *, const void *) = atomic_forced_read (__free_hook); if (__builtin_expect (hook != NULL, 0)) { // 设置了就去执行 (*hook)(mem, RETURN_ADDRESS (0)); return; } if (mem == 0) /* free(0) has no effect */ return; // 将用户提供的 mem 指针转换为堆块头指针 p = mem2chunk (mem); // 判断 chunk 是否由 mmap 分配 if (chunk_is_mmapped (p)) /* release mmapped memory. */ { /* see if the dynamic brk/mmap threshold needs adjusting */ // 如果是 mmap,则首先更新 mmap 分配和收缩阈值 if (!mp_.no_dyn_threshold && p->size > mp_.mmap_threshold && p->size <= DEFAULT_MMAP_THRESHOLD_MAX) { mp_.mmap_threshold = chunksize (p); mp_.trim_threshold = 2 * mp_.mmap_threshold; LIBC_PROBE (memory_mallopt_free_dyn_thresholds, 2, mp_.mmap_threshold, mp_.trim_threshold); } // 释放空间 munmap_chunk (p); return; } // 如果不是 mmap 创建,则调用 _int_free 函数 ar_ptr = arena_for_chunk (p); _int_free (ar_ptr, p, 0); }
_int_free
_int_free (mstate av, mchunkptr p, int have_lock) { INTERNAL_SIZE_T size; /* its size */ mfastbinptr *fb; /* associated fastbin */ mchunkptr nextchunk; /* next contiguous chunk */ INTERNAL_SIZE_T nextsize; /* its size */ int nextinuse; /* true if nextchunk is used */ INTERNAL_SIZE_T prevsize; /* size of previous contiguous chunk */ mchunkptr bck; /* misc temp for linking */ mchunkptr fwd; /* misc temp for linking */ const char *errstr = NULL; int locked = 0; size = chunksize (p); /* Little security check which won't hurt performance: the allocator never wrapps around at the end of the address space. Therefore we can exclude some size values which might appear here by accident or by "design" from some intruder. */ if (__builtin_expect ((uintptr_t) p > (uintptr_t) -size, 0) || __builtin_expect (misaligned_chunk (p), 0)) { errstr = "free(): invalid pointer"; errout: if (!have_lock && locked) (void) mutex_unlock (&av->mutex); malloc_printerr (check_action, errstr, chunk2mem (p), av); return; } /* We know that each chunk is at least MINSIZE bytes in size or a multiple of MALLOC_ALIGNMENT. */ // 检查 size 是否大于 MINSIZE,是否对其 if (__glibc_unlikely (size < MINSIZE || !aligned_OK (size))) { errstr = "free(): invalid size"; goto errout; } // 检查 chunk 是否在使用 check_inuse_chunk(av, p);
进行一系列检查后,进入 fastbin 的判断,如果大小合适就把这个被释放的 chunk 放入对应的 fastbin 链表中。
// 检查大小是不是属于 fastbin 块 if ((unsigned long)(size) <= (unsigned long)(get_max_fast ()) // 如果始于 fastbin 但与 top chunk 相邻,不会现在就合并到 top chunk #if TRIM_FASTBINS /* If TRIM_FASTBINS set, don't place chunks bordering top into fastbins */ && (chunk_at_offset(p, size) != av->top) #endif ) { if (__builtin_expect (chunk_at_offset (p, size)->size <= 2 * SIZE_SZ, 0) || __builtin_expect (chunksize (chunk_at_offset (p, size)) >= av->system_mem, 0)) { /* We might not have a lock at this point and concurrent modifications of system_mem might have let to a false positive. Redo the test after getting the lock. */ if (have_lock || ({ assert (locked == 0); mutex_lock(&av->mutex); locked = 1; chunk_at_offset (p, size)->size <= 2 * SIZE_SZ || chunksize (chunk_at_offset (p, size)) >= av->system_mem; })) { errstr = "free(): invalid next size (fast)"; goto errout; } if (! have_lock) { (void)mutex_unlock(&av->mutex); locked = 0; } } // 清理 fastbin 中的数据 free_perturb (chunk2mem(p), size - 2 * SIZE_SZ); // 初始化 fastbin set_fastchunks(av); // 获取 size 对用的 fastbin 下标 unsigned int idx = fastbin_index(size); fb = &fastbin (av, idx); /* Atomically link P to its fastbin: P->FD = *FB; *FB = P; */ mchunkptr old = *fb, old2; unsigned int old_idx = ~0u; do { /* Check that the top of the bin is not the record we are going to add (i.e., double free). */ // 判断当前 fastbin 头 chunk 与要被释放的 chunk 是否相同(防止 double free) if (__builtin_expect (old == p, 0)) { errstr = "double free or corruption (fasttop)"; goto errout; } /* Check that size of fastbin chunk at the top is the same as size of the chunk that we are adding. We can dereference OLD only if we have the lock, otherwise it might have already been deallocated. See use of OLD_IDX below for the actual check. */ // 将要被释放的 chunk 放入 fastbin 头部,修改其 fd 指针指向 old if (have_lock && old != NULL) old_idx = fastbin_index(chunksize(old)); p->fd = old2 = old; } while ((old = catomic_compare_and_exchange_val_rel (fb, p, old2)) != old2); if (have_lock && old != NULL && __builtin_expect (old_idx != idx, 0)) { errstr = "invalid fastbin entry (free)"; goto errout; } }
如果大小不属于 fastbin,则会判断进入 unsortedbin 中
/* Consolidate other non-mmapped chunks as they arrive. */ else if (!chunk_is_mmapped(p)) { if (! have_lock) { (void)mutex_lock(&av->mutex); locked = 1; } // 获取 next_chunk nextchunk = chunk_at_offset(p, size); /* Lightweight tests: check whether the block is already the top block. */ // 检查当前 chunk 是否是 top chunk 头(防止 double free) if (__glibc_unlikely (p == av->top)) { errstr = "double free or corruption (top)"; goto errout; } /* Or whether the next chunk is beyond the boundaries of the arena. */ // 判断 next chunk 是否超过分配区 if (__builtin_expect (contiguous (av) && (char *) nextchunk >= ((char *) av->top + chunksize(av->top)), 0)) { errstr = "double free or corruption (out)"; goto errout; } /* Or whether the block is actually not marked used. */ // 检查 next chunk 的 inuse 位,判断被释放的 chunk 是否在使用 if (__glibc_unlikely (!prev_inuse(nextchunk))) { errstr = "double free or corruption (!prev)"; goto errout; } // 判断 next chunk 的 size 是否合法 nextsize = chunksize(nextchunk); if (__builtin_expect (nextchunk->size <= 2 * SIZE_SZ, 0) || __builtin_expect (nextsize >= av->system_mem, 0)) { errstr = "free(): invalid next size (normal)"; goto errout; } // 清除要被释放的 chunk 的内容 free_perturb (chunk2mem(p), size - 2 * SIZE_SZ); /* consolidate backward */ // 如果前一个 prev chunk 也处于释放状态,则用 unlink 合并两个 chunk if (!prev_inuse(p)) { prevsize = p->prev_size; size += prevsize; p = chunk_at_offset(p, -((long) prevsize)); unlink(av, p, bck, fwd); } // 如果 next chunk 不是 top chunk if (nextchunk != av->top) { /* get and clear inuse bit */ nextinuse = inuse_bit_at_offset(nextchunk, nextsize); /* consolidate forward */ // 如果 next chunk 也处于释放中,则继续向下合并 next chunk if (!nextinuse) { unlink(av, nextchunk, bck, fwd); size += nextsize; } else // 清除 inuse 位 clear_inuse_bit_at_offset(nextchunk, 0); /* Place the chunk in unsorted chunk list. Chunks are not placed into regular bins until after they have been given one chance to be used in malloc. */ // 获取当前 unsortedbin 的末尾 chunk 和链表头 chunk bck = unsorted_chunks(av); fwd = bck->fd; if (__glibc_unlikely (fwd->bk != bck)) { errstr = "free(): corrupted unsorted chunks"; goto errout; } // 将当前 chunk 插入 unsortedbin 中 p->fd = fwd; p->bk = bck; // 如果 size 不属于 small bin,需要设置 large bin if (!in_smallbin_range(size)) { p->fd_nextsize = NULL; p->bk_nextsize = NULL; } // 更新链表头尾指针 bck->fd = p; fwd->bk = p; // 设置 chunk 的头部和尾部 prev_size set_head(p, size | PREV_INUSE); set_foot(p, size); check_free_chunk(av, p); } /* If the chunk borders the current high end of memory, consolidate into top */ // 如果当前 chunk 临近 top chunk,则直接合并到 top chunk else { size += nextsize; set_head(p, size | PREV_INUSE); av->top = p; check_chunk(av, p); } /* If freeing a large space, consolidate possibly-surrounding chunks. Then, if the total unused topmost memory exceeds trim threshold, ask malloc_trim to reduce top. Unless max_fast is 0, we don't know if there are fastbins bordering top, so we cannot tell for sure whether threshold has been reached unless fastbins are consolidated. But we don't want to consolidate on each free. As a compromise, consolidation is performed if FASTBIN_CONSOLIDATION_THRESHOLD is reached. */ // 如果前面释放的 chunk 大小较大,将 fast bin 合并到 unsortedbin 中 if ((unsigned long)(size) >= FASTBIN_CONSOLIDATION_THRESHOLD) { if (have_fastchunks(av)) malloc_consolidate(av); // 如果进程的分配区是主分配区,调用 systrim 收缩内存,否则获取非主分配区的 heap_info,用 heap_trim 收缩 heap if (av == &main_arena) { #ifndef MORECORE_CANNOT_TRIM if ((unsigned long)(chunksize(av->top)) >= (unsigned long)(mp_.trim_threshold)) systrim(mp_.top_pad, av); #endif } else { /* Always try heap_trim(), even if the top chunk is not large, because the corresponding heap might go away. */ heap_info *heap = heap_for_ptr(top(av)); assert(heap->ar_ptr == av); heap_trim(heap, mp_.top_pad); } } if (! have_lock) { assert (locked); (void)mutex_unlock(&av->mutex); } } /* If the chunk was allocated via mmap, release via munmap(). */ // 如果是 mmap 分配就调用 munmap 释放该 chunk else { munmap_chunk (p); } }
-
参考文献