free

HackTricks 지원

Free 주문 요약

(이 요약에서는 검사 사항이 설명되지 않으며 간결함을 위해 일부 사례가 생략되었습니다)

  1. 주소가 null이면 아무 작업도 수행하지 않습니다.

  2. 청크가 mmaped된 경우, mummap하고 종료합니다.

  3. _int_free를 호출합니다:

    1. 가능한 경우, 청크를 tcache에 추가합니다.

    2. 가능한 경우, 청크를 fast bin에 추가합니다.

    3. 필요한 경우 _int_free_merge_chunk를 호출하여 청크를 통합하고 unsorted 목록에 추가합니다.

__libc_free

Free__libc_free를 호출합니다.

  • 전달된 주소가 Null(0)인 경우 아무 작업도 수행하지 않습니다.

  • 포인터 태그를 확인합니다.

  • 청크가 mmaped된 경우, mummap하고 모두 종료합니다.

  • 그렇지 않으면 색상을 추가하고 _int_free를 호출합니다.

__lib_free 코드

```c void __libc_free (void *mem) { mstate ar_ptr; mchunkptr p; /* chunk corresponding to mem */

if (mem == 0) /* free(0) has no effect */ return;

/* Quickly check that the freed pointer matches the tag for the memory. This gives a useful double-free detection. */ if (__glibc_unlikely (mtag_enabled)) *(volatile char *)mem;

int err = errno;

p = mem2chunk (mem);

if (chunk_is_mmapped (p)) /* release mmapped memory. / { / See if the dynamic brk/mmap threshold needs adjusting. Dumped fake mmapped chunks do not affect the threshold. */ if (!mp_.no_dyn_threshold && chunksize_nomask (p) > mp_.mmap_threshold && chunksize_nomask (p) <= DEFAULT_MMAP_THRESHOLD_MAX) { mp_.mmap_threshold = chunksize (p); mp_.trim_threshold = 2 * mp_.mmap_threshold; LIBC_PROBE (memory_mallopt_free_dyn_thresholds, 2, mp_.mmap_threshold, mp_.trim_threshold); } munmap_chunk (p); } else { MAYBE_INIT_TCACHE ();

/* Mark the chunk as belonging to the library again. */ (void)tag_region (chunk2mem (p), memsize (p));

ar_ptr = arena_for_chunk (p); _int_free (ar_ptr, p, 0); }

__set_errno (err); } libc_hidden_def (__libc_free)

</details>

## \_int\_free <a href="#int_free" id="int_free"></a>

### \_int\_free 시작 <a href="#int_free" id="int_free"></a>

일부 확인을 시작합니다:

* **포인터**가 **정렬**되어 있는지 확인하거나 오류 `free(): invalid pointer`를 발생시킵니다.
* **크기**가 최소값보다 작지 않고 **크기**가 또한 **정렬**되어 있는지 확인하거나 오류 `free(): invalid size`를 발생시킵니다.
```c
// From https://github.com/bminor/glibc/blob/f942a732d37a96217ef828116ebe64a644db18d7/malloc/malloc.c#L4493C1-L4513C28

#define aligned_OK(m) (((unsigned long) (m) &MALLOC_ALIGN_MASK) == 0)

static void
_int_free (mstate av, mchunkptr p, int have_lock)
{
INTERNAL_SIZE_T size;        /* its size */
mfastbinptr *fb;             /* associated fastbin */

size = chunksize (p);

/* Little security check which won't hurt performance: the
allocator never wraps around at the end of the address space.
Therefore we can exclude some size values which might appear
here by accident or by "design" from some intruder.  */
if (__builtin_expect ((uintptr_t) p > (uintptr_t) -size, 0)
|| __builtin_expect (misaligned_chunk (p), 0))
malloc_printerr ("free(): invalid pointer");
/* We know that each chunk is at least MINSIZE bytes in size or a
multiple of MALLOC_ALIGNMENT.  */
if (__glibc_unlikely (size < MINSIZE || !aligned_OK (size)))
malloc_printerr ("free(): invalid size");

check_inuse_chunk(av, p);

_int_free tcache

먼저 해당 청크를 관련 tcache에 할당하려고 시도합니다. 그러나 이전에 몇 가지 확인이 수행됩니다. 해제된 청크와 동일한 인덱스의 tcache의 모든 청크를 순환하며 다음을 수행합니다.

  • mp_.tcache_count보다 더 많은 항목이 있는 경우: free(): too many chunks detected in tcache

  • 항목이 정렬되어 있지 않은 경우: free(): unaligned chunk detected in tcache 2

  • 해제된 청크가 이미 해제되었고 tcache의 청크로 존재하는 경우: free(): double free detected in tcache 2

모든 것이 순조롭게 진행되면, 청크가 tcache에 추가되고 함수가 반환됩니다.

_int_free tcache

```c // From https://github.com/bminor/glibc/blob/f942a732d37a96217ef828116ebe64a644db18d7/malloc/malloc.c#L4515C1-L4554C7 #if USE_TCACHE { size_t tc_idx = csize2tidx (size); if (tcache != NULL && tc_idx < mp_.tcache_bins) { /* Check to see if it's already in the tcache. */ tcache_entry *e = (tcache_entry *) chunk2mem (p);

/* This test succeeds on double free. However, we don't 100% trust it (it also matches random payload data at a 1 in 2^<size_t> chance), so verify it's not an unlikely coincidence before aborting. / if (_glibc_unlikely (e->key == tcache_key)) { tcache_entry *tmp; size_t cnt = 0; LIBC_PROBE (memory_tcache_double_free, 2, e, tc_idx); for (tmp = tcache->entries[tc_idx]; tmp; tmp = REVEAL_PTR (tmp->next), ++cnt) { if (cnt >= mp.tcache_count) malloc_printerr ("free(): too many chunks detected in tcache"); if (__glibc_unlikely (!aligned_OK (tmp))) malloc_printerr ("free(): unaligned chunk detected in tcache 2"); if (tmp == e) malloc_printerr ("free(): double free detected in tcache 2"); / If we get here, it was a coincidence. We've wasted a few cycles, but don't abort. */ } }

if (tcache->counts[tc_idx] < mp_.tcache_count) { tcache_put (p, tc_idx); return; } } } #endif

</details>

### \_int\_free fast bin <a href="#int_free" id="int_free"></a>

먼저 크기가 fast bin에 적합한지 확인하고 최상단 청크에 가까이 설정할 수 있는지 확인합니다.

그런 다음 몇 가지 확인을 수행하면서 해제된 청크를 fast bin의 맨 위에 추가합니다:

- 청크의 크기가 잘못된 경우 (너무 크거나 작은 경우): `free(): invalid next size (fast)`
- 추가된 청크가 이미 fast bin의 맨 위에 있는 경우: `double free or corruption (fasttop)`
- 맨 위의 청크의 크기가 추가하려는 청크의 크기와 다른 경우: `invalid fastbin entry (free)`

<details>

<summary>_int_free Fast Bin</summary>
```c
// From https://github.com/bminor/glibc/blob/f942a732d37a96217ef828116ebe64a644db18d7/malloc/malloc.c#L4556C2-L4631C4

/*
If eligible, place chunk on a fastbin so it can be found
and used quickly in malloc.
*/

if ((unsigned long)(size) <= (unsigned long)(get_max_fast ())

#if TRIM_FASTBINS
/*
If TRIM_FASTBINS set, don't place chunks
bordering top into fastbins
*/
&& (chunk_at_offset(p, size) != av->top)
#endif
) {

if (__builtin_expect (chunksize_nomask (chunk_at_offset (p, size))
<= CHUNK_HDR_SZ, 0)
|| __builtin_expect (chunksize (chunk_at_offset (p, size))
>= av->system_mem, 0))
{
bool fail = true;
/* We might not have a lock at this point and concurrent modifications
of system_mem might result in a false positive.  Redo the test after
getting the lock.  */
if (!have_lock)
{
__libc_lock_lock (av->mutex);
fail = (chunksize_nomask (chunk_at_offset (p, size)) <= CHUNK_HDR_SZ
|| chunksize (chunk_at_offset (p, size)) >= av->system_mem);
__libc_lock_unlock (av->mutex);
}

if (fail)
malloc_printerr ("free(): invalid next size (fast)");
}

free_perturb (chunk2mem(p), size - CHUNK_HDR_SZ);

atomic_store_relaxed (&av->have_fastchunks, true);
unsigned int idx = fastbin_index(size);
fb = &fastbin (av, idx);

/* Atomically link P to its fastbin: P->FD = *FB; *FB = P;  */
mchunkptr old = *fb, old2;

if (SINGLE_THREAD_P)
{
/* Check that the top of the bin is not the record we are going to
add (i.e., double free).  */
if (__builtin_expect (old == p, 0))
malloc_printerr ("double free or corruption (fasttop)");
p->fd = PROTECT_PTR (&p->fd, old);
*fb = p;
}
else
do
{
/* Check that the top of the bin is not the record we are going to
add (i.e., double free).  */
if (__builtin_expect (old == p, 0))
malloc_printerr ("double free or corruption (fasttop)");
old2 = old;
p->fd = PROTECT_PTR (&p->fd, old);
}
while ((old = catomic_compare_and_exchange_val_rel (fb, p, old2))
!= old2);

/* Check that size of fastbin chunk at the top is the same as
size of the chunk that we are adding.  We can dereference OLD
only if we have the lock, otherwise it might have already been
allocated again.  */
if (have_lock && old != NULL
&& __builtin_expect (fastbin_index (chunksize (old)) != idx, 0))
malloc_printerr ("invalid fastbin entry (free)");
}

_int_free 마무리

만약 청크가 아직 어떤 bin에 할당되지 않았다면, _int_free_merge_chunk를 호출합니다.

```c /* Consolidate other non-mmapped chunks as they arrive. */

else if (!chunk_is_mmapped(p)) {

/* If we're single-threaded, don't lock the arena. */ if (SINGLE_THREAD_P) have_lock = true;

if (!have_lock) __libc_lock_lock (av->mutex);

_int_free_merge_chunk (av, p, size);

if (!have_lock) __libc_lock_unlock (av->mutex); } /* If the chunk was allocated via mmap, release via munmap(). */

else { munmap_chunk (p); } }

</details>

## \_int\_free\_merge\_chunk

이 함수는 크기가 SIZE 바이트인 청크 P를 이웃한 청크와 병합하려고 시도합니다. 결과 청크를 정렬되지 않은 bin 목록에 넣습니다.

일부 확인이 수행됩니다:

* 청크가 최상위 청크인 경우: `double free or corruption (top)`
* 다음 청크가 아레나의 경계를 벗어난 경우: `double free or corruption (out)`
* 청크가 사용 중으로 표시되지 않은 경우 (`prev_inuse`에서 다음 청크): `double free or corruption (!prev)`
* 다음 청크의 크기가 너무 작거나 너무 큰 경우: `free(): invalid next size (normal)`
* 이전 청크가 사용 중이 아닌 경우, 병합을 시도합니다. 그러나, prev\_size가 이전 청크에서 지정된 크기와 다른 경우: `corrupted size vs. prev_size while consolidating`

<details>

<summary>_int_free_merge_chunk 코드</summary>
```c
// From https://github.com/bminor/glibc/blob/f942a732d37a96217ef828116ebe64a644db18d7/malloc/malloc.c#L4660C1-L4702C2

/* Try to merge chunk P of SIZE bytes with its neighbors.  Put the
resulting chunk on the appropriate bin list.  P must not be on a
bin list yet, and it can be in use.  */
static void
_int_free_merge_chunk (mstate av, mchunkptr p, INTERNAL_SIZE_T size)
{
mchunkptr nextchunk = chunk_at_offset(p, size);

/* Lightweight tests: check whether the block is already the
top block.  */
if (__glibc_unlikely (p == av->top))
malloc_printerr ("double free or corruption (top)");
/* Or whether the next chunk is beyond the boundaries of the arena.  */
if (__builtin_expect (contiguous (av)
&& (char *) nextchunk
>= ((char *) av->top + chunksize(av->top)), 0))
malloc_printerr ("double free or corruption (out)");
/* Or whether the block is actually not marked used.  */
if (__glibc_unlikely (!prev_inuse(nextchunk)))
malloc_printerr ("double free or corruption (!prev)");

INTERNAL_SIZE_T nextsize = chunksize(nextchunk);
if (__builtin_expect (chunksize_nomask (nextchunk) <= CHUNK_HDR_SZ, 0)
|| __builtin_expect (nextsize >= av->system_mem, 0))
malloc_printerr ("free(): invalid next size (normal)");

free_perturb (chunk2mem(p), size - CHUNK_HDR_SZ);

/* Consolidate backward.  */
if (!prev_inuse(p))
{
INTERNAL_SIZE_T prevsize = prev_size (p);
size += prevsize;
p = chunk_at_offset(p, -((long) prevsize));
if (__glibc_unlikely (chunksize(p) != prevsize))
malloc_printerr ("corrupted size vs. prev_size while consolidating");
unlink_chunk (av, p);
}

/* Write the chunk header, maybe after merging with the following chunk.  */
size = _int_free_create_chunk (av, p, size, nextchunk, nextsize);
_int_free_maybe_consolidate (av, size);
}
HackTricks 지원하기

Last updated