-
Notifications
You must be signed in to change notification settings - Fork 3
/
trx_malloc.c
531 lines (357 loc) · 13.9 KB
/
trx_malloc.c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
/*
* Authors:
* + Andrea Fioraldi <[email protected]>
* + Pietro Borrello <[email protected]>
*
* License: BSD 2-Clause
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/mman.h>
#include <unistd.h>
#define SBRK_INCR 4096
#define NBINS 0x80
#define NFASTBINS 7
#define MINSIZE 0x20
#define MAX_FAST 0x80
#define FASTBIN_CONSOLIDATION_THRESHOLD 1024
#define SIZE_SZ sizeof(size_t)
#define IS_MMAPPED 0x2
#define PREV_INUSE 0x1
/* Offset of a filed, e.g. offsetof(struct malloc_chunk, bk) == SIZE_SZ */
#define offsetof(type, field) ((unsigned long)&(((type*)(0))->field))
/* Given a chunk get the memory that must be returned by malloc.
Same as (void*)&chunk->fd */
#define chunk2mem(c) ((void*)((char*)(c) + 2 * SIZE_SZ))
/* Given a pointer returned by malloc get the associated chunk */
#define mem2chunk(mem) ((struct malloc_chunk*)((char*)(mem)-2 * SIZE_SZ))
/* Get the size from a chunk without the last 3 bits that are flags */
#define chunksize(c) ((c)->size - ((c)->size & 0x7))
/* A chunk size must be a aligned to 2*SIZE_SZ and greater that MINSIZE */
#define request2size(req) \
(((req) + SIZE_SZ < MINSIZE) \
? MINSIZE \
: (((req) + SIZE_SZ + (2 * SIZE_SZ) - 1) & -(2 * SIZE_SZ)))
/* Get the index of a bin associated to a chunk size (e.g. size2bin(48) == 1) */
#define size2bin(s) (((s) >> 4) - 2)
/* Read the chunk size and navigate to the next chunk in memory */
#define next_by_mem(c) ( (struct malloc_chunk*)((char*)(c) + chunksize(c)) )
/* Navigate to the next chunk in memory given the offset */
#define next_by_off(c, off) ( (struct malloc_chunk*)((char*)(c) + (off)) )
/* Read the chunk prev_size and navigate to the previous chunk in memory */
#define prev_by_mem(c) ( (struct malloc_chunk*)((char*)(c) - (c)->prev_size) )
/* Set flags bits */
#define unset_prev_inuse(c) ((c)->size - ((c)->size & PREV_INUSE))
#define set_prev_inuse(c) ((c)->size |= PREV_INUSE)
#define set_mmapped(c) ((c)->size |= IS_MMAPPED)
/* Get flags bits */
#define is_mmapped(c) ((c)->size & IS_MMAPPED)
#define prev_inuse(c) ((c)->size & PREV_INUSE)
/* arena.bins are truncated malloc_chunk structure, use this macro to access a
bin instead of arena.bins[something] */
#define bin_at(i) \
((struct malloc_chunk*)((char*)&arena.bins[i * 2] - \
offsetof(struct malloc_chunk, fd)))
/* Take a chunk off a bin list */
#define unlink(P, BK, FD) \
do { \
FD = P->fd; \
BK = P->BK; \
FD->bk = BK; \
BK->fd = FD; \
} while (0)
#ifdef DEBUG
#define debug(...) fprintf(stderr, __VA_ARGS__)
#else
#define debug(...) do {} while(0)
#endif
#define error(m) \
do { \
fprintf(stderr, "error: %s\n", m); \
abort(); \
} while (0)
/* A chunk in memory is like:
chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Size of previous chunk, if unallocated (P clear) |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
`head:' | Size of chunk, in bytes |0|M|P|
mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Forward pointer to next chunk in list [U] |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Back pointer to previous chunk in list [U] |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Space [U] .
. .
. .
. |
nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
`foot:' | Size of chunk, in bytes [U] |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Size of next chunk, in bytes |0|M|P|
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
Last 3 bits of size are flags:
- last bit (P): Previous chunk in memory is in use
- 2nd last bit (M): Chunk is mmapped
- 3rd last bit: Not used, but used by ptmalloc
When allocated, All the parts marked with [U] are user data insrted in the
returned memory by malloc(). They are metadata only when the chunk is free.
*/
struct malloc_chunk {
size_t prev_size;
size_t size;
struct malloc_chunk* fd;
struct malloc_chunk* bk;
};
struct malloc_state {
/* Fastbin are NULL terminated single linked list.
size2bin give us the offset (same for bins).
There are 7 fastbin in trxmalloc and they are mapped in this way:
- malloc(0..24) will have size = 32 -> fastbin 0
- malloc(25..40) will have size = 48 -> fastbin 1
- malloc(41..56) will have size = 64 -> fastbin 2
- malloc(57..72) will have size = 80 -> fastbin 3
- malloc(73..88) will have size = 96 -> fastbin 4
- malloc(89..104) will have size = 112 -> fastbin 5
- malloc(105..120) will have size = 128 -> fastbin 6 */
struct malloc_chunk* fastbins[NFASTBINS];
struct malloc_chunk* top;
/* In these bins a new free chunk is inserted at the front and requests are
served from the back in a FIFO spirit.
Each bin is treated as chunk but only fd and bk are really used so size
and prev_size are mapped to the space of the previous bin (yeah a bit
messy but ptmalloc works in this way).
bin N-1 -> +-+-+-+-+-+-+-+-+-+-+-+-+-+
| N-1 fd (N prev_size) |
+-+-+-+-+-+-+-+-+-+-+-+-+-+
| N-1 bk (N size) |
bin N -> +-+-+-+-+-+-+-+-+-+-+-+-+-+
| N fd |
+-+-+-+-+-+-+-+-+-+-+-+-+-+
| N bk |
+-+-+-+-+-+-+-+-+-+-+-+-+-+
When a bin is empty fd and bk points to the start of the bin interpreted
as chunk. So, if the bin N is empty, N fd and bk are the address of
bin N-1 (look at the initialization code in trx_malloc() and the bin_at
macro). */
struct malloc_chunk* bins[NBINS * 2 - 2];
};
/* Thread safety? What is thread safety? TODO */
struct malloc_state arena;
/* Malloc hooks are function pointers that can replace malloc/realloc/free
when they are != NULL (and this is pornograhic for an attacker ;) )*/
void *(*__trx_malloc_hook)(size_t);
void *(*__trx_realloc_hook)(void*, size_t);
void (*__trx_free_hook)(void*);
/* Utility function to serve mmapped chunks */
static void* _trx_mmap_chunk(size_t size) {
size_t sz = request2size(size) + SIZE_SZ;
struct malloc_chunk *victim =
mmap(0, sz, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
if (victim == (struct malloc_chunk*)MAP_FAILED)
return NULL;
victim->size = sz;
set_mmapped(victim);
debug(" = %p\n", chunk2mem(victim));
return chunk2mem(victim);
}
void* trx_malloc(size_t size) {
debug("malloc(%lu)", size);
if (__trx_malloc_hook)
return __trx_malloc_hook(size);
size_t sz = request2size(size);
size_t idx = size2bin(sz);
struct malloc_chunk* victim;
if (sz > NBINS * 0x10)
return _trx_mmap_chunk(size);
if (!arena.top) {
/* Initialize heap.
sbrk(0) returns the current heap end. */
void* initial = sbrk(0);
if (initial == (void*)-1)
error("trx_malloc(): sbrk() failed, very very bad");
if (sbrk(SBRK_INCR) == (void*)-1)
error("trx_malloc(): sbrk() failed, very very bad");
/* Intialize arena.bins. Bins are double linked.
As simplification (from ptmalloc) bins acts like chunks but in order to
save space only fd and bk are used.
When empty, bin->fd = bin->bk = bin */
size_t i;
for (i = 0; i < NBINS; ++i)
bin_at(i)->fd = bin_at(i)->bk = bin_at(i);
struct malloc_chunk* new_top = initial;
new_top->size = (SBRK_INCR - SIZE_SZ) | 1;
arena.top = new_top;
goto allocate_from_top;
}
/* From fastbin if avaiable */
if (sz <= MAX_FAST) {
victim = arena.fastbins[idx];
if (victim) {
debug("removing %p from fastbin %lu\n", victim, idx);
set_prev_inuse(next_by_off(victim, sz));
arena.fastbins[idx] = victim->fd;
debug(" = %p\n", chunk2mem(victim));
return chunk2mem(victim);
}
}
/* From bins if avaiable with exact size */
if ((victim = bin_at(idx)->bk) != bin_at(idx)) {
struct malloc_chunk* bck = victim->bk;
set_prev_inuse(next_by_off(victim, sz));
bin_at(idx)->bk = bck;
bck->fd = bin_at(idx);
debug(" = %p\n", chunk2mem(victim));
return chunk2mem(victim);
}
/* From bins if avaiable of greater size */
idx += 1;
while (idx < NBINS - 1) {
if ((victim = bin_at(idx)->bk) != bin_at(idx)) {
/* TODO decrease size of this chunk when the requested size is less than
bin size and create a remainder free chunk that must be inserted in
the correct bin.
e.g. with sz = 0x20 and chunksize(victim) = 0x100
set victim size to 0x20, create a remainder with size 0x80
and insert into bin_at(size2bin(0x80)) */
struct malloc_chunk* bck = victim->bk;
set_prev_inuse(next_by_mem(victim));
bin_at(idx)->bk = bck;
bck->fd = bin_at(idx);
debug(" = %p\n", chunk2mem(victim));
return chunk2mem(victim);
}
idx += 1;
}
/* Allocate from top chunk */
allocate_from_top:
victim = arena.top;
if (victim->size >= sz) {
size_t old_sz = chunksize(victim);
victim->size = sz | PREV_INUSE;
struct malloc_chunk* new_top = next_by_off(victim, sz);
new_top->size = (old_sz - sz) | PREV_INUSE;
arena.top = new_top;
debug(" = %p\n", chunk2mem(victim));
return chunk2mem(victim);
}
/* Request memory to the kernel */
if (sbrk(SBRK_INCR) == (void*)-1)
error("trx_malloc(): sbrk() failed, very very bad");
arena.top->size += SBRK_INCR;
/* Retry with more memory from the OS */
return trx_malloc(size);
}
/* Tears down chunks in fastbins */
void _trx_malloc_consolidate() {
/* TODO consolidate bulk merge fastbins in a larger chunk.
Traverse the fastbins, see if chunks are contiguos in memory, then merge
them and inserted the new chunk in the proper bin.
Remind that the top chunk cannot have a freed chunk as previous chunk */
return;
}
void trx_free(void* ptr) {
debug("free(%p)\n", ptr);
if (__trx_free_hook) {
__trx_free_hook(ptr);
return;
}
/* free(0) is a no-op to be POSIX compliant */
if (!ptr)
return;
struct malloc_chunk* ck = mem2chunk(ptr);
size_t sz = chunksize(ck);
if (is_mmapped(ck)) {
munmap(ck, sz);
return;
}
size_t idx = size2bin(sz);
if (sz <= MAX_FAST) {
/* Unset prev_inuse of the next chunk if it is not the top chunk,
otherwise consolidate with the top chunk. Top chunk prev_inuse is
always set in this way */
if (next_by_off(ck, sz) != arena.top) {
unset_prev_inuse(next_by_off(ck, sz));
/* Insert at the top of the fastbin and set fd to the old top */
struct malloc_chunk* old_p = arena.fastbins[idx];
arena.fastbins[idx] = ck;
ck->fd = old_p;
next_by_off(ck, sz)->prev_size = sz;
} else {
/* Update arena.top. Note that prev_inuse already in arena.top->size */
debug("consolidating %p with top\n", ptr);
ck->size = chunksize(ck) + arena.top->size;
arena.top = ck;
}
} else {
/* When freeing a large size (not very large in this allocator) consolidate
the possibily sourrunding fast chunks.
if(sz >= FASTBIN_CONSOLIDATION_THRESHOLD)
_trx_malloc_consolidate(); */
struct malloc_chunk* p;
struct malloc_chunk *fd, *bk;
/* Consolidate backward */
while (!prev_inuse(ck)) {
p = prev_by_mem(ck);
/* Fast chunks are consolidated in bulk in malloc_consolidate() */
if (chunksize(p) <= MAX_FAST)
break;
unlink(p, fd, bk);
p->size += chunksize(ck);
ck = p;
}
p = next_by_mem(ck);
/* Consolidate forward */
while (p != arena.top && chunksize(p) > MAX_FAST &&
!prev_inuse(next_by_mem(p))) {
unlink(p, fd, bk);
ck->size += chunksize(p);
p = next_by_mem(p);
}
/* Consolidate with top chunk */
if (p == arena.top) {
ck->size = chunksize(ck) + arena.top->size;
arena.top = ck;
return;
}
unset_prev_inuse(p);
sz = chunksize(ck);
idx = size2bin(sz);
fd = bin_at(idx)->fd;
bin_at(idx)->fd = ck;
ck->bk = bin_at(idx);
ck->fd = fd;
fd->bk = ck;
p->prev_size = sz;
}
return;
}
void* trx_calloc(size_t nmemb, size_t size) {
/* Ignurant implementation, maybe TODO, maybe left ignurant */
size_t s = nmemb * size;
void* p = malloc(s);
/* mmap() returns zeroed memory, do not steal the work to the kernel and
avoid to fill with zero again */
if (!is_mmapped(mem2chunk(p)))
memset(p, 0, s);
return p;
}
size_t trx_malloc_usable_size(void* ptr) {
struct malloc_chunk* c = mem2chunk(ptr);
return c->size - SIZE_SZ;
}
void* trx_realloc(void* ptr, size_t size) {
if (__trx_realloc_hook)
return __trx_realloc_hook(ptr, size);
/* TODO reallocate chunk if the new requested size does not fit in
trx_malloc_usable_size(ptr), otherwise returns ptr. */
/* realloc(0, size) fallbacks to malloc(size) to be POSIX copliant */
if (ptr == NULL)
return trx_malloc(size);
/* Very very dirty */
void* p = trx_malloc(size);
memcpy(p, ptr, trx_malloc_usable_size(ptr));
trx_free(ptr);
return p;
}
/* TODO posix_memalign function is missing :( */