victim = _int_malloc (ar_ptr, bytes); /* Retry with another arena only if we were able to find a usable arena before. */ if (!victim && ar_ptr != NULL) { LIBC_PROBE (memory_malloc_retry, 1, bytes); ar_ptr = arena_get_retry (ar_ptr, bytes); victim = _int_malloc (ar_ptr, bytes); }
if (ar_ptr != NULL) __libc_lock_unlock (ar_ptr->mutex);
</details>
рдиреЛрдЯ рдХрд░реЗрдВ рдХрд┐ рдпрд╣ рд╣рдореЗрд╢рд╛ рд╡рд╛рдкрд╕ рдЖрдиреЗ рд╡рд╛рд▓реЗ рдкреЙрдЗрдВрдЯрд░ рдХреЛ `tag_new_usable` рдХреЗ рд╕рд╛рде рдЯреИрдЧ рдХрд░реЗрдЧрд╛, рдХреЛрдб рд╕реЗ:
```c
void *tag_new_usable (void *ptr)
Allocate a new random color and use it to color the user region of
a chunk; this may include data from the subsequent chunk's header
if tagging is sufficiently fine grained. Returns PTR suitably
recolored for accessing the memory there.
// From https://github.com/bminor/glibc/blob/f942a732d37a96217ef828116ebe64a644db18d7/malloc/malloc.c#L3847staticvoid*_int_malloc (mstate av,size_t bytes){INTERNAL_SIZE_T nb; /* normalized request size */unsignedint idx; /* associated bin index */mbinptr bin; /* associated bin */mchunkptr victim; /* inspected/selected chunk */INTERNAL_SIZE_T size; /* its size */int victim_index; /* its bin index */mchunkptr remainder; /* remainder from a split */unsignedlong remainder_size; /* its size */unsignedint block; /* bit map traverser */unsignedint bit; /* bit map traverser */unsignedint map; /* current word of binmap */mchunkptr fwd; /* misc temp for linking */mchunkptr bck; /* misc temp for linking */#ifUSE_TCACHEsize_t tcache_unsorted_count; /* count of unsorted chunks processed */#endif/*Convert request size to internal form by adding SIZE_SZ bytesoverhead plus possibly more to obtain necessary alignment and/orto obtain a size of at least MINSIZE, the smallest allocatablesize. Also, checked_request2size returns false for request sizesthat are so large that they wrap around zero when padded andaligned.*/nb =checked_request2size (bytes);if (nb ==0){__set_errno (ENOMEM);returnNULL;}
// From https://github.com/bminor/glibc/blob/f942a732d37a96217ef828116ebe64a644db18d7/malloc/malloc.c#L3885C3-L3893C6/* There are no usable arenas. Fall back to sysmalloc to get a chunk frommmap. */if (__glibc_unlikely (av ==NULL)){void*p =sysmalloc (nb, av);if (p !=NULL)alloc_perturb (p, bytes);return p;}
```c // From https://github.com/bminor/glibc/blob/f942a732d37a96217ef828116ebe64a644db18d7/malloc/malloc.c#L3895C3-L3967C6 /* If the size qualifies as a fastbin, first check corresponding bin. This code is safe to execute even if av is not yet initialized, so we can try it without checking, which saves some time on this fast path. */
if (victim != NULL) { if (__glibc_unlikely (misaligned_chunk (victim))) malloc_printerr ("malloc(): unaligned fastbin chunk detected 2");
if (SINGLE_THREAD_P) fb = REVEAL_PTR (victim->fd); else REMOVE_FB (fb, pp, victim); if (__glibc_likely (victim != NULL)) { size_t victim_idx = fastbin_index (chunksize (victim)); if (__builtin_expect (victim_idx != idx, 0)) malloc_printerr ("malloc(): memory corruption (fast)"); check_remalloced_chunk (av, victim, nb); #if USE_TCACHE / While we're here, if we see other chunks of the same size, stash them in the tcache. */ size_t tc_idx = csize2tidx (nb); if (tcache != NULL && tc_idx < mp_.tcache_bins) { mchunkptr tc_victim;
/* While bin not empty and tcache not full, copy chunks. */ while (tcache->counts[tc_idx] < mp_.tcache_count && (tc_victim = *fb) != NULL) { if (__glibc_unlikely (misaligned_chunk (tc_victim))) malloc_printerr ("malloc(): unaligned fastbin chunk detected 3"); if (SINGLE_THREAD_P) *fb = REVEAL_PTR (tc_victim->fd); else { REMOVE_FB (fb, pp, tc_victim); if (__glibc_unlikely (tc_victim == NULL)) break; } tcache_put (tc_victim, tc_idx); } } #endif void *p = chunk2mem (victim); alloc_perturb (p, bytes); return p; } } }
</details>
### рдЫреЛрдЯрд╛ рдмрд┐рди
рдПрдХ рдЯрд┐рдкреНрдкрдгреА рдореЗрдВ рдЗрдВрдбрд┐рдХреЗрдЯ рдХрд┐рдпрд╛ рдЧрдпрд╛ рд╣реИ рдХрд┐ рдЫреЛрдЯреЗ рдмрд┐рдиреНрд╕ рдкреНрд░рддрд┐ рдЗрдВрдбреЗрдХреНрд╕ рдПрдХ рд╕рд╛рдЗрдЬрд╝ рдзрд╛рд░рд┐рдд рдХрд░рддреЗ рд╣реИрдВ, рдЗрд╕рд▓рд┐рдП рдпрджрд┐ рдПрдХ рд╡реИрдз рдЪрдВрдХ рдЙрдкрд▓рдмреНрдз рд╣реИ рддреЛ рдЙрд╕реЗ рдЬрд╛рдВрдЪрдирд╛ рдмрд╣реБрдд рддреЗрдЬреА рд╕реЗ рд╣реЛрддрд╛ рд╣реИ, рдЗрд╕рд▓рд┐рдП рдлрд╛рд╕реНрдЯ рдмрд┐рдиреНрд╕ рдХреЗ рдмрд╛рдж, рдЫреЛрдЯреЗ рдмрд┐рдиреНрд╕ рдХреА рдЬрд╛рдВрдЪ рдХреА рдЬрд╛рддреА рд╣реИред
рдкрд╣рд▓реА рдЬрд╛рдВрдЪ рдпрд╣ рд╣реИ рдХрд┐ рдХреНрдпрд╛ рдЕрдиреБрд░реЛрдзрд┐рдд рд╕рд╛рдЗрдЬрд╝ рдПрдХ рдЫреЛрдЯреЗ рдмрд┐рди рдореЗрдВ рд╣реЛ рд╕рдХрддрд╛ рд╣реИред рдЙрд╕ рдорд╛рдорд▓реЗ рдореЗрдВ, рдЫреЛрдЯреЗ рдмрд┐рди рдХреЗ рдЕрдВрджрд░ рд╕рдВрдмрдВрдзрд┐рдд **рдЗрдВрдбреЗрдХреНрд╕** рдкреНрд░рд╛рдкреНрдд рдХрд░реЗрдВ рдФрд░ рджреЗрдЦреЗрдВ рдХрд┐ рдХреНрдпрд╛ **рдХреЛрдИ рдЙрдкрд▓рдмреНрдз рдЪрдВрдХ** рд╣реИред
рдлрд┐рд░, рдПрдХ рд╕реБрд░рдХреНрд╖рд╛ рдЬрд╛рдВрдЪ рдХреА рдЬрд╛рддреА рд╣реИ рдЬрд┐рд╕рдореЗрдВ рдирд┐рдореНрдирд▓рд┐рдЦрд┐рдд рд╢рд╛рдорд┐рд▓ рд╣реИ:
*  рдпрджрд┐ `victim->bk->fd = victim`. рджреЗрдЦрдиреЗ рдХреЗ рд▓рд┐рдП рдХрд┐ рдХреНрдпрд╛ рджреЛрдиреЛрдВ рдЪрдВрдХ рд╕рд╣реА рдврдВрдЧ рд╕реЗ рд▓рд┐рдВрдХ рд╣реИрдВред
рдЙрд╕ рдорд╛рдорд▓реЗ рдореЗрдВ, рдЪрдВрдХ **рдХреЛ `inuse` рдмрд┐рдЯ рдорд┐рд▓рддрд╛ рд╣реИ,** рдбрдмрд▓ рд▓рд┐рдВрдХреНрдб рд╕реВрдЪреА рдареАрдХ рд╣реЛ рдЬрд╛рддреА рд╣реИ рддрд╛рдХрд┐ рдпрд╣ рдЪрдВрдХ рдЙрд╕рд╕реЗ рдЧрд╛рдпрдм рд╣реЛ рдЬрд╛рддрд╛ рд╣реИ (рдХреНрдпреЛрдВрдХрд┐ рдЗрд╕рдХрд╛ рдЙрдкрдпреЛрдЧ рдХрд┐рдпрд╛ рдЬрд╛рдПрдЧрд╛), рдФрд░ рдЧреИрд░ рдореБрдЦреНрдп рдПрд░рд┐рдирд╛ рдмрд┐рдЯ рд╕реЗрдЯ рдХрд┐рдпрд╛ рдЬрд╛рддрд╛ рд╣реИ рдпрджрд┐ рдЖрд╡рд╢реНрдпрдХ рд╣реЛред
рдЕрдВрддрддрдГ, **рдЕрдиреБрд░реЛрдзрд┐рдд рд╕рд╛рдЗрдЬрд╝ рдХреЗ tcache рдЗрдВрдбреЗрдХреНрд╕ рдХреЛ** рдЫреЛрдЯреЗ рдмрд┐рди рдХреЗ рдЕрдВрджрд░ рдЕрдиреНрдп рдЪрдВрдХреНрд╕ рд╕реЗ рднрд░реЗрдВ (рдпрджрд┐ рдХреЛрдИ рд╣реЛ)ред
<details>
<summary>_int_malloc small bin</summary>
```c
// From https://github.com/bminor/glibc/blob/f942a732d37a96217ef828116ebe64a644db18d7/malloc/malloc.c#L3895C3-L3967C6
/*
If a small request, check regular bin. Since these "smallbins"
hold one size each, no searching within bins is necessary.
(For a large request, we need to wait until unsorted chunks are
processed to find best fit. But for small ones, fits are exact
anyway, so we can check now, which is faster.)
*/
if (in_smallbin_range (nb))
{
idx = smallbin_index (nb);
bin = bin_at (av, idx);
if ((victim = last (bin)) != bin)
{
bck = victim->bk;
if (__glibc_unlikely (bck->fd != victim))
malloc_printerr ("malloc(): smallbin double linked list corrupted");
set_inuse_bit_at_offset (victim, nb);
bin->bk = bck;
bck->fd = bin;
if (av != &main_arena)
set_non_main_arena (victim);
check_malloced_chunk (av, victim, nb);
#if USE_TCACHE
/* While we're here, if we see other chunks of the same size,
stash them in the tcache. */
size_t tc_idx = csize2tidx (nb);
if (tcache != NULL && tc_idx < mp_.tcache_bins)
{
mchunkptr tc_victim;
/* While bin not empty and tcache not full, copy chunks over. */
while (tcache->counts[tc_idx] < mp_.tcache_count
&& (tc_victim = last (bin)) != bin)
{
if (tc_victim != 0)
{
bck = tc_victim->bk;
set_inuse_bit_at_offset (tc_victim, nb);
if (av != &main_arena)
set_non_main_arena (tc_victim);
bin->bk = bck;
bck->fd = bin;
tcache_put (tc_victim, tc_idx);
}
}
}
#endif
void *p = chunk2mem (victim);
alloc_perturb (p, bytes);
return p;
}
}
```c /* If this is a large request, consolidate fastbins before continuing. While it might look excessive to kill all fastbins before even seeing if there is space available, this avoids fragmentation problems normally associated with fastbins. Also, in practice, programs tend to have runs of either small or large requests, but less often mixtures, so consolidation is not invoked all that often in most programs. And the programs that it is called frequently in otherwise tend to fragment. */
```c /* Process recently freed or remaindered chunks, taking one only if it is exact fit, or, if this a small request, the chunk is remainder from the most recent non-exact fit. Place other traversed chunks in bins. Note that this step is the only place in any routine where chunks are placed in bins.
The outer loop here is needed because we might not realize until near the end of malloc that we should have consolidated, so must do so and retry. This happens at most once, and only when we would otherwise need to expand memory to service a "small" request. */
```c // From https://github.com/bminor/glibc/blob/master/malloc/malloc.c#L4227C1-L4250C7
#if USE_TCACHE /* If we've processed as many chunks as we're allowed while filling the cache, return one of the cached ones. */ ++tcache_unsorted_count; if (return_cached && mp_.tcache_unsorted_limit > 0 && tcache_unsorted_count > mp_.tcache_unsorted_limit) { return tcache_get (tc_idx); } #endif
#define MAX_ITERS 10000 if (++iters >= MAX_ITERS) break; }
#if USE_TCACHE /* If all the small chunks we found ended up cached, return one now. */ if (return_cached) { return tcache_get (tc_idx); } #endif