From 70101a3e5be86c0ddb2f86e4881c36dd1acc3ac1 Mon Sep 17 00:00:00 2001 From: Thomas HUET Date: Tue, 17 Jan 2017 00:13:23 +0100 Subject: [PATCH] 2.36b --- afl-fuzz.c | 7 +-- afl-gcc.c | 1 - afl-showmap.c | 75 ++++++++++++++++++++++++-------- afl-tmin.c | 60 ++++++++++++++++++++++++- afl-whatsup | 2 +- config.h | 2 +- docs/ChangeLog | 28 ++++++++++++ docs/README | 2 + docs/perf_tips.txt | 6 +++ libdislocator/libdislocator.so.c | 4 +- llvm_mode/afl-llvm-pass.so.cc | 10 +++-- qemu_mode/README.qemu | 5 +++ 12 files changed, 170 insertions(+), 32 deletions(-) diff --git a/afl-fuzz.c b/afl-fuzz.c index 663d3a62..e730cb29 100644 --- a/afl-fuzz.c +++ b/afl-fuzz.c @@ -7317,8 +7317,9 @@ static void get_core_count(void) { #endif /* __APPLE__ || __FreeBSD__ || __OpenBSD__ */ - OKF("You have %u CPU cores and %u runnable tasks (utilization: %0.0f%%).", - cpu_core_count, cur_runnable, cur_runnable * 100.0 / cpu_core_count); + OKF("You have %u CPU core%s and %u runnable tasks (utilization: %0.0f%%).", + cpu_core_count, cpu_core_count > 1 ? "s" : "", + cur_runnable, cur_runnable * 100.0 / cpu_core_count); if (cpu_core_count > 1) { @@ -7682,7 +7683,7 @@ int main(int argc, char** argv) { case 'S': if (sync_id) FATAL("Multiple -S or -M options not supported"); - sync_id = optarg; + sync_id = ck_strdup(optarg); break; case 'f': /* target file */ diff --git a/afl-gcc.c b/afl-gcc.c index e50b7fd6..fa3dec1a 100644 --- a/afl-gcc.c +++ b/afl-gcc.c @@ -323,7 +323,6 @@ int main(int argc, char** argv) { } - find_as(argv[0]); edit_params(argc, argv); diff --git a/afl-showmap.c b/afl-showmap.c index bb96002e..4b81862a 100644 --- a/afl-showmap.c +++ b/afl-showmap.c @@ -63,7 +63,8 @@ static s32 shm_id; /* ID of the SHM region */ static u8 quiet_mode, /* Hide non-essential messages? */ edges_only, /* Ignore hit counts? */ - cmin_mode; /* Generate output in afl-cmin mode? */ + cmin_mode, /* Generate output in afl-cmin mode? */ + binary_mode; /* Write output as a binary map */ static volatile u8 stop_soon, /* Ctrl-C pressed? */ @@ -73,7 +74,7 @@ static volatile u8 /* Classify tuple counts. Instead of mapping to individual bits, as in afl-fuzz.c, we map to more user-friendly numbers between 1 and 8. */ -static const u8 count_class_lookup[256] = { +static const u8 count_class_human[256] = { [0] = 0, [1] = 1, @@ -87,7 +88,21 @@ static const u8 count_class_lookup[256] = { }; -static void classify_counts(u8* mem) { +static const u8 count_class_binary[256] = { + + [0] = 0, + [1] = 1, + [2] = 2, + [3] = 4, + [4 ... 7] = 8, + [8 ... 15] = 16, + [16 ... 31] = 32, + [32 ... 127] = 64, + [128 ... 255] = 128 + +}; + +static void classify_counts(u8* mem, const u8* map) { u32 i = MAP_SIZE; @@ -101,7 +116,7 @@ static void classify_counts(u8* mem) { } else { while (i--) { - *mem = count_class_lookup[*mem]; + *mem = map[*mem]; mem++; } @@ -148,8 +163,8 @@ static void setup_shm(void) { static u32 write_results(void) { s32 fd; - FILE* f; u32 i, ret = 0; + u8 cco = !!getenv("AFL_CMIN_CRASHES_ONLY"), caa = !!getenv("AFL_CMIN_ALLOW_ANY"); @@ -171,27 +186,40 @@ static u32 write_results(void) { } - f = fdopen(fd, "w"); - if (!f) PFATAL("fdopen() failed"); + if (binary_mode) { - for (i = 0; i < MAP_SIZE; i++) { + for (i = 0; i < MAP_SIZE; i++) + if (trace_bits[i]) ret++; + + ck_write(fd, trace_bits, MAP_SIZE, out_file); + close(fd); - if (!trace_bits[i]) continue; - ret++; + } else { - if (cmin_mode) { + FILE* f = fdopen(fd, "w"); - if (child_timed_out) break; - if (!caa && child_crashed != cco) break; + if (!f) PFATAL("fdopen() failed"); - fprintf(f, "%u%u\n", trace_bits[i], i); + for (i = 0; i < MAP_SIZE; i++) { - } else fprintf(f, "%06u:%u\n", i, trace_bits[i]); + if (!trace_bits[i]) continue; + ret++; - } + if (cmin_mode) { + + if (child_timed_out) break; + if (!caa && child_crashed != cco) break; + + fprintf(f, "%u%u\n", trace_bits[i], i); + + } else fprintf(f, "%06u:%u\n", i, trace_bits[i]); + + } - fclose(f); + fclose(f); + + } return ret; @@ -293,7 +321,8 @@ static void run_target(char** argv) { if (*(u32*)trace_bits == EXEC_FAIL_SIG) FATAL("Unable to execute '%s'", argv[0]); - classify_counts(trace_bits); + classify_counts(trace_bits, binary_mode ? + count_class_binary : count_class_human); if (!quiet_mode) SAYF(cRST "-- Program output ends --\n"); @@ -585,7 +614,7 @@ int main(int argc, char** argv) { doc_path = access(DOC_PATH, F_OK) ? "docs" : DOC_PATH; - while ((opt = getopt(argc,argv,"+o:m:t:A:eqZQ")) > 0) + while ((opt = getopt(argc,argv,"+o:m:t:A:eqZQb")) > 0) switch (opt) { @@ -682,6 +711,14 @@ int main(int argc, char** argv) { qemu_mode = 1; break; + case 'b': + + /* Secret undocumented mode. Writes output in raw binary format + similar to that dumped by afl-fuzz in > 2); + + if (!mask) return; + + while (i--) { + + *mem &= ~*mask; + mem++; + mask++; + + } + +} + + /* See if any bytes are set in the bitmap. */ static inline u8 anything_set(void) { @@ -314,6 +334,7 @@ static u8 run_target(char** argv, u8* mem, u32 len, u8 first_run) { FATAL("Unable to execute '%s'", argv[0]); classify_counts(trace_bits); + apply_mask((u32*)trace_bits, (u32*)mask_bitmap); total_execs++; if (stop_soon) { @@ -919,6 +940,22 @@ static char** get_qemu_argv(u8* own_loc, char** argv, int argc) { } +/* Read mask bitmap from file. This is for the -B option. */ + +static void read_bitmap(u8* fname) { + + s32 fd = open(fname, O_RDONLY); + + if (fd < 0) PFATAL("Unable to open '%s'", fname); + + ck_read(fd, mask_bitmap, MAP_SIZE, fname); + + close(fd); + +} + + + /* Main entry point */ int main(int argc, char** argv) { @@ -931,7 +968,7 @@ int main(int argc, char** argv) { SAYF(cCYA "afl-tmin " cBRI VERSION cRST " by \n"); - while ((opt = getopt(argc,argv,"+i:o:f:m:t:xeQ")) > 0) + while ((opt = getopt(argc,argv,"+i:o:f:m:t:B:xeQ")) > 0) switch (opt) { @@ -1023,6 +1060,25 @@ int main(int argc, char** argv) { qemu_mode = 1; break; + case 'B': /* load bitmap */ + + /* This is a secret undocumented option! It is speculated to be useful + if you have a baseline "boring" input file and another "interesting" + file you want to minimize. + + You can dump a binary bitmap for the boring file using + afl-showmap -b, and then load it into afl-tmin via -B. The minimizer + will then minimize to preserve only the edges that are unique to + the interesting input file, but ignoring everything from the + original map. + + The option may be extended and made more official if it proves + to be useful. */ + + mask_bitmap = ck_alloc(MAP_SIZE); + read_bitmap(optarg); + break; + default: usage(argv[0]); diff --git a/afl-whatsup b/afl-whatsup index 9a186f96..a4d30418 100755 --- a/afl-whatsup +++ b/afl-whatsup @@ -74,7 +74,7 @@ if [ "$SUMMARY_ONLY" = "" ]; then fi -for i in `find . -maxdepth 2 -iname fuzzer_stats`; do +for i in `find . -maxdepth 2 -iname fuzzer_stats | sort`; do sed 's/^command_line.*$/_skip:1/;s/[ ]*:[ ]*/="/;s/$/"/' "$i" >"$TMP" . "$TMP" diff --git a/config.h b/config.h index 133a79e5..a5880b68 100644 --- a/config.h +++ b/config.h @@ -21,7 +21,7 @@ /* Version string: */ -#define VERSION "2.35b" +#define VERSION "2.36b" /****************************************************** * * diff --git a/docs/ChangeLog b/docs/ChangeLog index 9ff07584..fa776bc2 100644 --- a/docs/ChangeLog +++ b/docs/ChangeLog @@ -16,6 +16,34 @@ Not sure if you should upgrade? The lowest currently recommended version is 2.31b. If you're stuck on an earlier release, it's strongly advisable to get on with the times. +-------------- +Version 2.36b: +-------------- + + - Fixed a cosmetic bad free() bug when aborting -S sessions. Spotted + by Johannes S. + + - Made a small change to afl-whatsup to sort fuzzers by name. + + - Fixed a minor issue with malloc(0) in libdislocator. Spotted by + Rene Freingruber. + + - Changed the clobber pattern in libdislocator to a slightly more + reliable one. Suggested by Rene Freingruber. + + - Added a note about THP performance. Suggested by Sergey Davidoff. + + - Added a somewhat unofficial support for running afl-tmin with a + baseline "mask" that causes it to minimize only for edges that + are unique to the input file, but not to the "boring" baseline. + Suggested by Sami Liedes. + + - "Fixed" a getPassName() problem with never versions of clang. + Reported by Craig Young and several other folks. + + Yep, I know I have a backlog on several other feature requests. + Stay tuned! + -------------- Version 2.35b: -------------- diff --git a/docs/README b/docs/README index a937fa6f..501bdd43 100644 --- a/docs/README +++ b/docs/README @@ -479,6 +479,8 @@ bug reports, or patches from: Kurt Roeckx Marcel Bohme Van-Thuan Pham Abhik Roychoudhury Joshua J. Drake Toby Hutton + Rene Freingruber Sergey Davidoff + Sami Liedes Craig Young Thank you! diff --git a/docs/perf_tips.txt b/docs/perf_tips.txt index 0821b029..3a8997a2 100644 --- a/docs/perf_tips.txt +++ b/docs/perf_tips.txt @@ -167,6 +167,12 @@ There are several OS-level factors that may affect fuzzing speed: On other systems, the impact of CPU scaling will be different; when fuzzing, use OS-specific tools to find out if all cores are running at full speed. + - Transparent huge pages. Some allocators, such as jemalloc, can incur a + heavy fuzzing penalty when transparent huge pages (THP) are enabled in the + kernel. You can disable this via: + + echo never > /sys/kernel/mm/transparent_hugepage/enabled + - Suboptimal scheduling strategies. The significance of this will vary from one target to another, but on Linux, you may want to make sure that the following options are set: diff --git a/libdislocator/libdislocator.so.c b/libdislocator/libdislocator.so.c index 8a3e28c7..0dfc98e6 100644 --- a/libdislocator/libdislocator.so.c +++ b/libdislocator/libdislocator.so.c @@ -64,7 +64,7 @@ /* Canary & clobber bytes: */ #define ALLOC_CANARY 0xAACCAACC -#define ALLOC_CLOBBER 0x41 +#define ALLOC_CLOBBER 0xCC #define PTR_C(_p) (((u32*)(_p))[-1]) #define PTR_L(_p) (((u32*)(_p))[-2]) @@ -90,7 +90,7 @@ static void* __dislocator_alloc(size_t len) { void* ret; - if (total_mem + len > max_mem || total_mem + len <= total_mem) { + if (total_mem + len > max_mem || total_mem + len < total_mem) { if (hard_fail) FATAL("total allocs exceed %u MB", max_mem / 1024 / 1024); diff --git a/llvm_mode/afl-llvm-pass.so.cc b/llvm_mode/afl-llvm-pass.so.cc index 422cbad6..ad040e88 100644 --- a/llvm_mode/afl-llvm-pass.so.cc +++ b/llvm_mode/afl-llvm-pass.so.cc @@ -49,9 +49,13 @@ namespace { bool runOnModule(Module &M) override; - const char *getPassName() const override { - return "American Fuzzy Lop Instrumentation"; - } + /* Ugh, the return type changed in recent versions of LLVM + (const char* -> StringRef). Commenting out until the situation + stabilizes, since we don't strictly need this anyway. */ + + // StringRef getPassName() const override { + // return "American Fuzzy Lop Instrumentation"; + // } }; diff --git a/qemu_mode/README.qemu b/qemu_mode/README.qemu index ce4181e4..c27b2557 100644 --- a/qemu_mode/README.qemu +++ b/qemu_mode/README.qemu @@ -98,6 +98,11 @@ Compared to fully-fledged virtualization, the user emulation mode is *NOT* a security boundary. The binaries can freely interact with the host OS. If you somehow need to fuzz an untrusted binary, put everything in a sandbox first. +QEMU does not necessarily support all CPU or hardware features that your +target program may be utilizing. In particular, it does not appear to have +full support for AVX2 / FMA3. Using binaries for older CPUs, or recompling them +with -march=core2, can help. + Beyond that, this is an early-stage mechanism, so fields reports are welcome. You can send them to .