blob: a787f9a685a697d7c51199aa727e28490ac92dc8 [file] [log] [blame]
/*
american fuzzy lop - fuzzer code
--------------------------------
Written and maintained by Michal Zalewski <lcamtuf@google.com>
Forkserver design by Jann Horn <jannhorn@googlemail.com>
Copyright 2013, 2014, 2015, 2016, 2017 Google Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at:
http://www.apache.org/licenses/LICENSE-2.0
This is the real deal: the program takes an instrumented binary and
attempts a variety of basic fuzzing tricks, paying close attention to
how they affect the execution path.
*/
#define AFL_MAIN
#define MESSAGES_TO_STDOUT
#ifndef _GNU_SOURCE
#define _GNU_SOURCE
#endif
#define _FILE_OFFSET_BITS 64
#include "config.h"
#include "types.h"
#include "debug.h"
#include "alloc-inl.h"
#include "hash.h"
#include <stdio.h>
#include <unistd.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include <errno.h>
#include <signal.h>
#include <dirent.h>
#include <ctype.h>
#include <fcntl.h>
#include <termios.h>
#include <dlfcn.h>
#include <sched.h>
#include <sys/wait.h>
#include <sys/time.h>
#include <sys/shm.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <sys/resource.h>
#include <sys/mman.h>
#include <sys/ioctl.h>
#include <sys/file.h>
#if defined(__APPLE__) || defined(__FreeBSD__) || defined (__OpenBSD__)
# include <sys/sysctl.h>
#endif /* __APPLE__ || __FreeBSD__ || __OpenBSD__ */
/* For systems that have sched_setaffinity; right now just Linux, but one
can hope... */
#ifdef __linux__
# define HAVE_AFFINITY 1
#endif /* __linux__ */
/* A toggle to export some variables when building as a library. Not very
useful for the general public. */
#ifdef AFL_LIB
# define EXP_ST
#else
# define EXP_ST static
#endif /* ^AFL_LIB */
/* Lots of globals, but mostly for the status UI and other things where it
really makes no sense to haul them around as function parameters. */
EXP_ST u8 *in_dir, /* Input directory with test cases */
*out_file, /* File to fuzz, if any */
*out_dir, /* Working & output directory */
*sync_dir, /* Synchronization directory */
*sync_id, /* Fuzzer ID */
*use_banner, /* Display banner */
*in_bitmap, /* Input bitmap */
*doc_path, /* Path to documentation dir */
*target_path, /* Path to target binary */
*orig_cmdline; /* Original command line */
EXP_ST u32 exec_tmout = EXEC_TIMEOUT; /* Configurable exec timeout (ms) */
static u32 hang_tmout = EXEC_TIMEOUT; /* Timeout used for hang det (ms) */
EXP_ST u64 mem_limit = MEM_LIMIT; /* Memory cap for child (MB) */
static u32 stats_update_freq = 1; /* Stats update frequency (execs) */
EXP_ST u8 skip_deterministic, /* Skip deterministic stages? */
force_deterministic, /* Force deterministic stages? */
use_splicing, /* Recombine input files? */
dumb_mode, /* Run in non-instrumented mode? */
score_changed, /* Scoring for favorites changed? */
kill_signal, /* Signal that killed the child */
resuming_fuzz, /* Resuming an older fuzzing job? */
timeout_given, /* Specific timeout given? */
not_on_tty, /* stdout is not a tty */
term_too_small, /* terminal dimensions too small */
uses_asan, /* Target uses ASAN? */
no_forkserver, /* Disable forkserver? */
crash_mode, /* Crash mode! Yeah! */
in_place_resume, /* Attempt in-place resume? */
auto_changed, /* Auto-generated tokens changed? */
no_cpu_meter_red, /* Feng shui on the status screen */
no_arith, /* Skip most arithmetic ops */
shuffle_queue, /* Shuffle input queue? */
bitmap_changed = 1, /* Time to update bitmap? */
qemu_mode, /* Running in QEMU mode? */
skip_requested, /* Skip request, via SIGUSR1 */
run_over10m, /* Run time over 10 minutes? */
persistent_mode, /* Running in persistent mode? */
deferred_mode, /* Deferred forkserver mode? */
fast_cal; /* Try to calibrate faster? */
static s32 out_fd, /* Persistent fd for out_file */
dev_urandom_fd = -1, /* Persistent fd for /dev/urandom */
dev_null_fd = -1, /* Persistent fd for /dev/null */
fsrv_ctl_fd, /* Fork server control pipe (write) */
fsrv_st_fd; /* Fork server status pipe (read) */
static s32 forksrv_pid, /* PID of the fork server */
child_pid = -1, /* PID of the fuzzed program */
out_dir_fd = -1; /* FD of the lock file */
EXP_ST u8* trace_bits; /* SHM with instrumentation bitmap */
EXP_ST u8 virgin_bits[MAP_SIZE], /* Regions yet untouched by fuzzing */
virgin_tmout[MAP_SIZE], /* Bits we haven't seen in tmouts */
virgin_crash[MAP_SIZE]; /* Bits we haven't seen in crashes */
static u8 var_bytes[MAP_SIZE]; /* Bytes that appear to be variable */
static s32 shm_id; /* ID of the SHM region */
static volatile u8 stop_soon, /* Ctrl-C pressed? */
clear_screen = 1, /* Window resized? */
child_timed_out; /* Traced process timed out? */
EXP_ST u32 queued_paths, /* Total number of queued testcases */
queued_variable, /* Testcases with variable behavior */
queued_at_start, /* Total number of initial inputs */
queued_discovered, /* Items discovered during this run */
queued_imported, /* Items imported via -S */
queued_favored, /* Paths deemed favorable */
queued_with_cov, /* Paths with new coverage bytes */
pending_not_fuzzed, /* Queued but not done yet */
pending_favored, /* Pending favored paths */
cur_skipped_paths, /* Abandoned inputs in cur cycle */
cur_depth, /* Current path depth */
max_depth, /* Max path depth */
useless_at_start, /* Number of useless starting paths */
var_byte_count, /* Bitmap bytes with var behavior */
current_entry, /* Current queue entry ID */
havoc_div = 1; /* Cycle count divisor for havoc */
EXP_ST u64 total_crashes, /* Total number of crashes */
unique_crashes, /* Crashes with unique signatures */
total_tmouts, /* Total number of timeouts */
unique_tmouts, /* Timeouts with unique signatures */
unique_hangs, /* Hangs with unique signatures */
total_execs, /* Total execve() calls */
start_time, /* Unix start time (ms) */
last_path_time, /* Time for most recent path (ms) */
last_crash_time, /* Time for most recent crash (ms) */
last_hang_time, /* Time for most recent hang (ms) */
last_crash_execs, /* Exec counter at last crash */
queue_cycle, /* Queue round counter */
cycles_wo_finds, /* Cycles without any new paths */
trim_execs, /* Execs done to trim input files */
bytes_trim_in, /* Bytes coming into the trimmer */
bytes_trim_out, /* Bytes coming outa the trimmer */
blocks_eff_total, /* Blocks subject to effector maps */
blocks_eff_select; /* Blocks selected as fuzzable */
static u32 subseq_tmouts; /* Number of timeouts in a row */
static u8 *stage_name = "init", /* Name of the current fuzz stage */
*stage_short, /* Short stage name */
*syncing_party; /* Currently syncing with... */
static s32 stage_cur, stage_max; /* Stage progression */
static s32 splicing_with = -1; /* Splicing with which test case? */
static u32 master_id, master_max; /* Master instance job splitting */
static u32 syncing_case; /* Syncing with case #... */
static s32 stage_cur_byte, /* Byte offset of current stage op */
stage_cur_val; /* Value used for stage op */
static u8 stage_val_type; /* Value type (STAGE_VAL_*) */
static u64 stage_finds[32], /* Patterns found per fuzz stage */
stage_cycles[32]; /* Execs per fuzz stage */
static u32 rand_cnt; /* Random number counter */
static u64 total_cal_us, /* Total calibration time (us) */
total_cal_cycles; /* Total calibration cycles */
static u64 total_bitmap_size, /* Total bit count for all bitmaps */
total_bitmap_entries; /* Number of bitmaps counted */
static s32 cpu_core_count; /* CPU core count */
#ifdef HAVE_AFFINITY
static s32 cpu_aff = -1; /* Selected CPU core */
#endif /* HAVE_AFFINITY */
static FILE* plot_file; /* Gnuplot output file */
struct queue_entry {
u8* fname; /* File name for the test case */
u32 len; /* Input length */
u8 cal_failed, /* Calibration failed? */
trim_done, /* Trimmed? */
was_fuzzed, /* Had any fuzzing done yet? */
passed_det, /* Deterministic stages passed? */
has_new_cov, /* Triggers new coverage? */
var_behavior, /* Variable behavior? */
favored, /* Currently favored? */
fs_redundant; /* Marked as redundant in the fs? */
u32 bitmap_size, /* Number of bits set in bitmap */
exec_cksum; /* Checksum of the execution trace */
u64 exec_us, /* Execution time (us) */
handicap, /* Number of queue cycles behind */
depth; /* Path depth */
u8* trace_mini; /* Trace bytes, if kept */
u32 tc_ref; /* Trace bytes ref count */
struct queue_entry *next, /* Next element, if any */
*next_100; /* 100 elements ahead */
};
static struct queue_entry *queue, /* Fuzzing queue (linked list) */
*queue_cur, /* Current offset within the queue */
*queue_top, /* Top of the list */
*q_prev100; /* Previous 100 marker */
static struct queue_entry*
top_rated[MAP_SIZE]; /* Top entries for bitmap bytes */
struct extra_data {
u8* data; /* Dictionary token data */
u32 len; /* Dictionary token length */
u32 hit_cnt; /* Use count in the corpus */
};
static struct extra_data* extras; /* Extra tokens to fuzz with */
static u32 extras_cnt; /* Total number of tokens read */
static struct extra_data* a_extras; /* Automatically selected extras */
static u32 a_extras_cnt; /* Total number of tokens available */
static u8* (*post_handler)(u8* buf, u32* len);
/* Interesting values, as per config.h */
static s8 interesting_8[] = { INTERESTING_8 };
static s16 interesting_16[] = { INTERESTING_8, INTERESTING_16 };
static s32 interesting_32[] = { INTERESTING_8, INTERESTING_16, INTERESTING_32 };
/* Fuzzing stages */
enum {
/* 00 */ STAGE_FLIP1,
/* 01 */ STAGE_FLIP2,
/* 02 */ STAGE_FLIP4,
/* 03 */ STAGE_FLIP8,
/* 04 */ STAGE_FLIP16,
/* 05 */ STAGE_FLIP32,
/* 06 */ STAGE_ARITH8,
/* 07 */ STAGE_ARITH16,
/* 08 */ STAGE_ARITH32,
/* 09 */ STAGE_INTEREST8,
/* 10 */ STAGE_INTEREST16,
/* 11 */ STAGE_INTEREST32,
/* 12 */ STAGE_EXTRAS_UO,
/* 13 */ STAGE_EXTRAS_UI,
/* 14 */ STAGE_EXTRAS_AO,
/* 15 */ STAGE_HAVOC,
/* 16 */ STAGE_SPLICE
};
/* Stage value types */
enum {
/* 00 */ STAGE_VAL_NONE,
/* 01 */ STAGE_VAL_LE,
/* 02 */ STAGE_VAL_BE
};
/* Execution status fault codes */
enum {
/* 00 */ FAULT_NONE,
/* 01 */ FAULT_TMOUT,
/* 02 */ FAULT_CRASH,
/* 03 */ FAULT_ERROR,
/* 04 */ FAULT_NOINST,
/* 05 */ FAULT_NOBITS
};
/* Get unix time in milliseconds */
static u64 get_cur_time(void) {
struct timeval tv;
struct timezone tz;
gettimeofday(&tv, &tz);
return (tv.tv_sec * 1000ULL) + (tv.tv_usec / 1000);
}
/* Get unix time in microseconds */
static u64 get_cur_time_us(void) {
struct timeval tv;
struct timezone tz;
gettimeofday(&tv, &tz);
return (tv.tv_sec * 1000000ULL) + tv.tv_usec;
}
/* Generate a random number (from 0 to limit - 1). This may
have slight bias. */
static inline u32 UR(u32 limit) {
if (unlikely(!rand_cnt--)) {
u32 seed[2];
ck_read(dev_urandom_fd, &seed, sizeof(seed), "/dev/urandom");
srandom(seed[0]);
rand_cnt = (RESEED_RNG / 2) + (seed[1] % RESEED_RNG);
}
return random() % limit;
}
/* Shuffle an array of pointers. Might be slightly biased. */
static void shuffle_ptrs(void** ptrs, u32 cnt) {
u32 i;
for (i = 0; i < cnt - 2; i++) {
u32 j = i + UR(cnt - i);
void *s = ptrs[i];
ptrs[i] = ptrs[j];
ptrs[j] = s;
}
}
#ifdef HAVE_AFFINITY
/* Build a list of processes bound to specific cores. Returns -1 if nothing
can be found. Assumes an upper bound of 4k CPUs. */
static void bind_to_free_cpu(void) {
DIR* d;
struct dirent* de;
cpu_set_t c;
u8 cpu_used[4096] = { 0 };
u32 i;
if (cpu_core_count < 2) return;
if (getenv("AFL_NO_AFFINITY")) {
WARNF("Not binding to a CPU core (AFL_NO_AFFINITY set).");
return;
}
d = opendir("/proc");
if (!d) {
WARNF("Unable to access /proc - can't scan for free CPU cores.");
return;
}
ACTF("Checking CPU core loadout...");
/* Introduce some jitter, in case multiple AFL tasks are doing the same
thing at the same time... */
usleep(R(1000) * 250);
/* Scan all /proc/<pid>/status entries, checking for Cpus_allowed_list.
Flag all processes bound to a specific CPU using cpu_used[]. This will
fail for some exotic binding setups, but is likely good enough in almost
all real-world use cases. */
while ((de = readdir(d))) {
u8* fn;
FILE* f;
u8 tmp[MAX_LINE];
u8 has_vmsize = 0;
if (!isdigit(de->d_name[0])) continue;
fn = alloc_printf("/proc/%s/status", de->d_name);
if (!(f = fopen(fn, "r"))) {
ck_free(fn);
continue;
}
while (fgets(tmp, MAX_LINE, f)) {
u32 hval;
/* Processes without VmSize are probably kernel tasks. */
if (!strncmp(tmp, "VmSize:\t", 8)) has_vmsize = 1;
if (!strncmp(tmp, "Cpus_allowed_list:\t", 19) &&
!strchr(tmp, '-') && !strchr(tmp, ',') &&
sscanf(tmp + 19, "%u", &hval) == 1 && hval < sizeof(cpu_used) &&
has_vmsize) {
cpu_used[hval] = 1;
break;
}
}
ck_free(fn);
fclose(f);
}
closedir(d);
for (i = 0; i < cpu_core_count; i++) if (!cpu_used[i]) break;
if (i == cpu_core_count) {
SAYF("\n" cLRD "[-] " cRST
"Uh-oh, looks like all %u CPU cores on your system are allocated to\n"
" other instances of afl-fuzz (or similar CPU-locked tasks). Starting\n"
" another fuzzer on this machine is probably a bad plan, but if you are\n"
" absolutely sure, you can set AFL_NO_AFFINITY and try again.\n",
cpu_core_count);
FATAL("No more free CPU cores");
}
OKF("Found a free CPU core, binding to #%u.", i);
cpu_aff = i;
CPU_ZERO(&c);
CPU_SET(i, &c);
if (sched_setaffinity(0, sizeof(c), &c))
PFATAL("sched_setaffinity failed");
}
#endif /* HAVE_AFFINITY */
#ifndef IGNORE_FINDS
/* Helper function to compare buffers; returns first and last differing offset. We
use this to find reasonable locations for splicing two files. */
static void locate_diffs(u8* ptr1, u8* ptr2, u32 len, s32* first, s32* last) {
s32 f_loc = -1;
s32 l_loc = -1;
u32 pos;
for (pos = 0; pos < len; pos++) {
if (*(ptr1++) != *(ptr2++)) {
if (f_loc == -1) f_loc = pos;
l_loc = pos;
}
}
*first = f_loc;
*last = l_loc;
return;
}
#endif /* !IGNORE_FINDS */
/* Describe integer. Uses 12 cyclic static buffers for return values. The value
returned should be five characters or less for all the integers we reasonably
expect to see. */
static u8* DI(u64 val) {
static u8 tmp[12][16];
static u8 cur;
cur = (cur + 1) % 12;
#define CHK_FORMAT(_divisor, _limit_mult, _fmt, _cast) do { \
if (val < (_divisor) * (_limit_mult)) { \
sprintf(tmp[cur], _fmt, ((_cast)val) / (_divisor)); \
return tmp[cur]; \
} \
} while (0)
/* 0-9999 */
CHK_FORMAT(1, 10000, "%llu", u64);
/* 10.0k - 99.9k */
CHK_FORMAT(1000, 99.95, "%0.01fk", double);
/* 100k - 999k */
CHK_FORMAT(1000, 1000, "%lluk", u64);
/* 1.00M - 9.99M */
CHK_FORMAT(1000 * 1000, 9.995, "%0.02fM", double);
/* 10.0M - 99.9M */
CHK_FORMAT(1000 * 1000, 99.95, "%0.01fM", double);
/* 100M - 999M */
CHK_FORMAT(1000 * 1000, 1000, "%lluM", u64);
/* 1.00G - 9.99G */
CHK_FORMAT(1000LL * 1000 * 1000, 9.995, "%0.02fG", double);
/* 10.0G - 99.9G */
CHK_FORMAT(1000LL * 1000 * 1000, 99.95, "%0.01fG", double);
/* 100G - 999G */
CHK_FORMAT(1000LL * 1000 * 1000, 1000, "%lluG", u64);
/* 1.00T - 9.99G */
CHK_FORMAT(1000LL * 1000 * 1000 * 1000, 9.995, "%0.02fT", double);
/* 10.0T - 99.9T */
CHK_FORMAT(1000LL * 1000 * 1000 * 1000, 99.95, "%0.01fT", double);
/* 100T+ */
strcpy(tmp[cur], "infty");
return tmp[cur];
}
/* Describe float. Similar to the above, except with a single
static buffer. */
static u8* DF(double val) {
static u8 tmp[16];
if (val < 99.995) {
sprintf(tmp, "%0.02f", val);
return tmp;
}
if (val < 999.95) {
sprintf(tmp, "%0.01f", val);
return tmp;
}
return DI((u64)val);
}
/* Describe integer as memory size. */
static u8* DMS(u64 val) {
static u8 tmp[12][16];
static u8 cur;
cur = (cur + 1) % 12;
/* 0-9999 */
CHK_FORMAT(1, 10000, "%llu B", u64);
/* 10.0k - 99.9k */
CHK_FORMAT(1024, 99.95, "%0.01f kB", double);
/* 100k - 999k */
CHK_FORMAT(1024, 1000, "%llu kB", u64);
/* 1.00M - 9.99M */
CHK_FORMAT(1024 * 1024, 9.995, "%0.02f MB", double);
/* 10.0M - 99.9M */
CHK_FORMAT(1024 * 1024, 99.95, "%0.01f MB", double);
/* 100M - 999M */
CHK_FORMAT(1024 * 1024, 1000, "%llu MB", u64);
/* 1.00G - 9.99G */
CHK_FORMAT(1024LL * 1024 * 1024, 9.995, "%0.02f GB", double);
/* 10.0G - 99.9G */
CHK_FORMAT(1024LL * 1024 * 1024, 99.95, "%0.01f GB", double);
/* 100G - 999G */
CHK_FORMAT(1024LL * 1024 * 1024, 1000, "%llu GB", u64);
/* 1.00T - 9.99G */
CHK_FORMAT(1024LL * 1024 * 1024 * 1024, 9.995, "%0.02f TB", double);
/* 10.0T - 99.9T */
CHK_FORMAT(1024LL * 1024 * 1024 * 1024, 99.95, "%0.01f TB", double);
#undef CHK_FORMAT
/* 100T+ */
strcpy(tmp[cur], "infty");
return tmp[cur];
}
/* Describe time delta. Returns one static buffer, 34 chars of less. */
static u8* DTD(u64 cur_ms, u64 event_ms) {
static u8 tmp[64];
u64 delta;
s32 t_d, t_h, t_m, t_s;
if (!event_ms) return "none seen yet";
delta = cur_ms - event_ms;
t_d = delta / 1000 / 60 / 60 / 24;
t_h = (delta / 1000 / 60 / 60) % 24;
t_m = (delta / 1000 / 60) % 60;
t_s = (delta / 1000) % 60;
sprintf(tmp, "%s days, %u hrs, %u min, %u sec", DI(t_d), t_h, t_m, t_s);
return tmp;
}
/* Mark deterministic checks as done for a particular queue entry. We use the
.state file to avoid repeating deterministic fuzzing when resuming aborted
scans. */
static void mark_as_det_done(struct queue_entry* q) {
u8* fn = strrchr(q->fname, '/');
s32 fd;
fn = alloc_printf("%s/queue/.state/deterministic_done/%s", out_dir, fn + 1);
fd = open(fn, O_WRONLY | O_CREAT | O_EXCL, 0600);
if (fd < 0) PFATAL("Unable to create '%s'", fn);
close(fd);
ck_free(fn);
q->passed_det = 1;
}
/* Mark as variable. Create symlinks if possible to make it easier to examine
the files. */
static void mark_as_variable(struct queue_entry* q) {
u8 *fn = strrchr(q->fname, '/') + 1, *ldest;
ldest = alloc_printf("../../%s", fn);
fn = alloc_printf("%s/queue/.state/variable_behavior/%s", out_dir, fn);
if (symlink(ldest, fn)) {
s32 fd = open(fn, O_WRONLY | O_CREAT | O_EXCL, 0600);
if (fd < 0) PFATAL("Unable to create '%s'", fn);
close(fd);
}
ck_free(ldest);
ck_free(fn);
q->var_behavior = 1;
}
/* Mark / unmark as redundant (edge-only). This is not used for restoring state,
but may be useful for post-processing datasets. */
static void mark_as_redundant(struct queue_entry* q, u8 state) {
u8* fn;
s32 fd;
if (state == q->fs_redundant) return;
q->fs_redundant = state;
fn = strrchr(q->fname, '/');
fn = alloc_printf("%s/queue/.state/redundant_edges/%s", out_dir, fn + 1);
if (state) {
fd = open(fn, O_WRONLY | O_CREAT | O_EXCL, 0600);
if (fd < 0) PFATAL("Unable to create '%s'", fn);
close(fd);
} else {
if (unlink(fn)) PFATAL("Unable to remove '%s'", fn);
}
ck_free(fn);
}
/* Append new test case to the queue. */
static void add_to_queue(u8* fname, u32 len, u8 passed_det) {
struct queue_entry* q = ck_alloc(sizeof(struct queue_entry));
q->fname = fname;
q->len = len;
q->depth = cur_depth + 1;
q->passed_det = passed_det;
if (q->depth > max_depth) max_depth = q->depth;
if (queue_top) {
queue_top->next = q;
queue_top = q;
} else q_prev100 = queue = queue_top = q;
queued_paths++;
pending_not_fuzzed++;
cycles_wo_finds = 0;
if (!(queued_paths % 100)) {
q_prev100->next_100 = q;
q_prev100 = q;
}
last_path_time = get_cur_time();
}
/* Destroy the entire queue. */
EXP_ST void destroy_queue(void) {
struct queue_entry *q = queue, *n;
while (q) {
n = q->next;
ck_free(q->fname);
ck_free(q->trace_mini);
ck_free(q);
q = n;
}
}
/* Write bitmap to file. The bitmap is useful mostly for the secret
-B option, to focus a separate fuzzing session on a particular
interesting input without rediscovering all the others. */
EXP_ST void write_bitmap(void) {
u8* fname;
s32 fd;
if (!bitmap_changed) return;
bitmap_changed = 0;
fname = alloc_printf("%s/fuzz_bitmap", out_dir);
fd = open(fname, O_WRONLY | O_CREAT | O_TRUNC, 0600);
if (fd < 0) PFATAL("Unable to open '%s'", fname);
ck_write(fd, virgin_bits, MAP_SIZE, fname);
close(fd);
ck_free(fname);
}
/* Read bitmap from file. This is for the -B option again. */
EXP_ST void read_bitmap(u8* fname) {
s32 fd = open(fname, O_RDONLY);
if (fd < 0) PFATAL("Unable to open '%s'", fname);
ck_read(fd, virgin_bits, MAP_SIZE, fname);
close(fd);
}
/* Check if the current execution path brings anything new to the table.
Update virgin bits to reflect the finds. Returns 1 if the only change is
the hit-count for a particular tuple; 2 if there are new tuples seen.
Updates the map, so subsequent calls will always return 0.
This function is called after every exec() on a fairly large buffer, so
it needs to be fast. We do this in 32-bit and 64-bit flavors. */
static inline u8 has_new_bits(u8* virgin_map) {
#ifdef __x86_64__
u64* current = (u64*)trace_bits;
u64* virgin = (u64*)virgin_map;
u32 i = (MAP_SIZE >> 3);
#else
u32* current = (u32*)trace_bits;
u32* virgin = (u32*)virgin_map;
u32 i = (MAP_SIZE >> 2);
#endif /* ^__x86_64__ */
u8 ret = 0;
while (i--) {
/* Optimize for (*current & *virgin) == 0 - i.e., no bits in current bitmap
that have not been already cleared from the virgin map - since this will
almost always be the case. */
if (unlikely(*current) && unlikely(*current & *virgin)) {
if (likely(ret < 2)) {
u8* cur = (u8*)current;
u8* vir = (u8*)virgin;
/* Looks like we have not found any new bytes yet; see if any non-zero
bytes in current[] are pristine in virgin[]. */
#ifdef __x86_64__
if ((cur[0] && vir[0] == 0xff) || (cur[1] && vir[1] == 0xff) ||
(cur[2] && vir[2] == 0xff) || (cur[3] && vir[3] == 0xff) ||
(cur[4] && vir[4] == 0xff) || (cur[5] && vir[5] == 0xff) ||
(cur[6] && vir[6] == 0xff) || (cur[7] && vir[7] == 0xff)) ret = 2;
else ret = 1;
#else
if ((cur[0] && vir[0] == 0xff) || (cur[1] && vir[1] == 0xff) ||
(cur[2] && vir[2] == 0xff) || (cur[3] && vir[3] == 0xff)) ret = 2;
else ret = 1;
#endif /* ^__x86_64__ */
}
*virgin &= ~*current;
}
current++;
virgin++;
}
if (ret && virgin_map == virgin_bits) bitmap_changed = 1;
return ret;
}
/* Count the number of bits set in the provided bitmap. Used for the status
screen several times every second, does not have to be fast. */
static u32 count_bits(u8* mem) {
u32* ptr = (u32*)mem;
u32 i = (MAP_SIZE >> 2);
u32 ret = 0;
while (i--) {
u32 v = *(ptr++);
/* This gets called on the inverse, virgin bitmap; optimize for sparse
data. */
if (v == 0xffffffff) {
ret += 32;
continue;
}
v -= ((v >> 1) & 0x55555555);
v = (v & 0x33333333) + ((v >> 2) & 0x33333333);
ret += (((v + (v >> 4)) & 0xF0F0F0F) * 0x01010101) >> 24;
}
return ret;
}
#define FF(_b) (0xff << ((_b) << 3))
/* Count the number of bytes set in the bitmap. Called fairly sporadically,
mostly to update the status screen or calibrate and examine confirmed
new paths. */
static u32 count_bytes(u8* mem) {
u32* ptr = (u32*)mem;
u32 i = (MAP_SIZE >> 2);
u32 ret = 0;
while (i--) {
u32 v = *(ptr++);
if (!v) continue;
if (v & FF(0)) ret++;
if (v & FF(1)) ret++;
if (v & FF(2)) ret++;
if (v & FF(3)) ret++;
}
return ret;
}
/* Count the number of non-255 bytes set in the bitmap. Used strictly for the
status screen, several calls per second or so. */
static u32 count_non_255_bytes(u8* mem) {
u32* ptr = (u32*)mem;
u32 i = (MAP_SIZE >> 2);
u32 ret = 0;
while (i--) {
u32 v = *(ptr++);
/* This is called on the virgin bitmap, so optimize for the most likely
case. */
if (v == 0xffffffff) continue;
if ((v & FF(0)) != FF(0)) ret++;
if ((v & FF(1)) != FF(1)) ret++;
if ((v & FF(2)) != FF(2)) ret++;
if ((v & FF(3)) != FF(3)) ret++;
}
return ret;
}
/* Destructively simplify trace by eliminating hit count information
and replacing it with 0x80 or 0x01 depending on whether the tuple
is hit or not. Called on every new crash or timeout, should be
reasonably fast. */
static const u8 simplify_lookup[256] = {
[0] = 1,
[1 ... 255] = 128
};
#ifdef __x86_64__
static void simplify_trace(u64* mem) {
u32 i = MAP_SIZE >> 3;
while (i--) {
/* Optimize for sparse bitmaps. */
if (unlikely(*mem)) {
u8* mem8 = (u8*)mem;
mem8[0] = simplify_lookup[mem8[0]];
mem8[1] = simplify_lookup[mem8[1]];
mem8[2] = simplify_lookup[mem8[2]];
mem8[3] = simplify_lookup[mem8[3]];
mem8[4] = simplify_lookup[mem8[4]];
mem8[5] = simplify_lookup[mem8[5]];
mem8[6] = simplify_lookup[mem8[6]];
mem8[7] = simplify_lookup[mem8[7]];
} else *mem = 0x0101010101010101ULL;
mem++;
}
}
#else
static void simplify_trace(u32* mem) {
u32 i = MAP_SIZE >> 2;
while (i--) {
/* Optimize for sparse bitmaps. */
if (unlikely(*mem)) {
u8* mem8 = (u8*)mem;
mem8[0] = simplify_lookup[mem8[0]];
mem8[1] = simplify_lookup[mem8[1]];
mem8[2] = simplify_lookup[mem8[2]];
mem8[3] = simplify_lookup[mem8[3]];
} else *mem = 0x01010101;
mem++;
}
}
#endif /* ^__x86_64__ */
/* Destructively classify execution counts in a trace. This is used as a
preprocessing step for any newly acquired traces. Called on every exec,
must be fast. */
static const u8 count_class_lookup8[256] = {
[0] = 0,
[1] = 1,
[2] = 2,
[3] = 4,
[4 ... 7] = 8,
[8 ... 15] = 16,
[16 ... 31] = 32,
[32 ... 127] = 64,
[128 ... 255] = 128
};
static u16 count_class_lookup16[65536];
EXP_ST void init_count_class16(void) {
u32 b1, b2;
for (b1 = 0; b1 < 256; b1++)
for (b2 = 0; b2 < 256; b2++)
count_class_lookup16[(b1 << 8) + b2] =
(count_class_lookup8[b1] << 8) |
count_class_lookup8[b2];
}
#ifdef __x86_64__
static inline void classify_counts(u64* mem) {
u32 i = MAP_SIZE >> 3;
while (i--) {
/* Optimize for sparse bitmaps. */
if (unlikely(*mem)) {
u16* mem16 = (u16*)mem;
mem16[0] = count_class_lookup16[mem16[0]];
mem16[1] = count_class_lookup16[mem16[1]];
mem16[2] = count_class_lookup16[mem16[2]];
mem16[3] = count_class_lookup16[mem16[3]];
}
mem++;
}
}
#else
static inline void classify_counts(u32* mem) {
u32 i = MAP_SIZE >> 2;
while (i--) {
/* Optimize for sparse bitmaps. */
if (unlikely(*mem)) {
u16* mem16 = (u16*)mem;
mem16[0] = count_class_lookup16[mem16[0]];
mem16[1] = count_class_lookup16[mem16[1]];
}
mem++;
}
}
#endif /* ^__x86_64__ */
/* Get rid of shared memory (atexit handler). */
static void remove_shm(void) {
shmctl(shm_id, IPC_RMID, NULL);
}
/* Compact trace bytes into a smaller bitmap. We effectively just drop the
count information here. This is called only sporadically, for some
new paths. */
static void minimize_bits(u8* dst, u8* src) {
u32 i = 0;
while (i < MAP_SIZE) {
if (*(src++)) dst[i >> 3] |= 1 << (i & 7);
i++;
}
}
/* When we bump into a new path, we call this to see if the path appears
more "favorable" than any of the existing ones. The purpose of the
"favorables" is to have a minimal set of paths that trigger all the bits
seen in the bitmap so far, and focus on fuzzing them at the expense of
the rest.
The first step of the process is to maintain a list of top_rated[] entries
for every byte in the bitmap. We win that slot if there is no previous
contender, or if the contender has a more favorable speed x size factor. */
static void update_bitmap_score(struct queue_entry* q) {
u32 i;
u64 fav_factor = q->exec_us * q->len;
/* For every byte set in trace_bits[], see if there is a previous winner,
and how it compares to us. */
for (i = 0; i < MAP_SIZE; i++)
if (trace_bits[i]) {
if (top_rated[i]) {
/* Faster-executing or smaller test cases are favored. */
if (fav_factor > top_rated[i]->exec_us * top_rated[i]->len) continue;
/* Looks like we're going to win. Decrease ref count for the
previous winner, discard its trace_bits[] if necessary. */
if (!--top_rated[i]->tc_ref) {
ck_free(top_rated[i]->trace_mini);
top_rated[i]->trace_mini = 0;
}
}
/* Insert ourselves as the new winner. */
top_rated[i] = q;
q->tc_ref++;
if (!q->trace_mini) {
q->trace_mini = ck_alloc(MAP_SIZE >> 3);
minimize_bits(q->trace_mini, trace_bits);
}
score_changed = 1;
}
}
/* The second part of the mechanism discussed above is a routine that
goes over top_rated[] entries, and then sequentially grabs winners for
previously-unseen bytes (temp_v) and marks them as favored, at least
until the next run. The favored entries are given more air time during
all fuzzing steps. */
static void cull_queue(void) {
struct queue_entry* q;
static u8 temp_v[MAP_SIZE >> 3];
u32 i;
if (dumb_mode || !score_changed) return;
score_changed = 0;
memset(temp_v, 255, MAP_SIZE >> 3);
queued_favored = 0;
pending_favored = 0;
q = queue;
while (q) {
q->favored = 0;
q = q->next;
}
/* Let's see if anything in the bitmap isn't captured in temp_v.
If yes, and if it has a top_rated[] contender, let's use it. */
for (i = 0; i < MAP_SIZE; i++)
if (top_rated[i] && (temp_v[i >> 3] & (1 << (i & 7)))) {
u32 j = MAP_SIZE >> 3;
/* Remove all bits belonging to the current entry from temp_v. */
while (j--)
if (top_rated[i]->trace_mini[j])
temp_v[j] &= ~top_rated[i]->trace_mini[j];
top_rated[i]->favored = 1;
queued_favored++;
if (!top_rated[i]->was_fuzzed) pending_favored++;
}
q = queue;
while (q) {
mark_as_redundant(q, !q->favored);
q = q->next;
}
}
/* Configure shared memory and virgin_bits. This is called at startup. */
EXP_ST void setup_shm(void) {
u8* shm_str;
if (!in_bitmap) memset(virgin_bits, 255, MAP_SIZE);
memset(virgin_tmout, 255, MAP_SIZE);
memset(virgin_crash, 255, MAP_SIZE);
shm_id = shmget(IPC_PRIVATE, MAP_SIZE, IPC_CREAT | IPC_EXCL | 0600);
if (shm_id < 0) PFATAL("shmget() failed");
atexit(remove_shm);
shm_str = alloc_printf("%d", shm_id);
/* If somebody is asking us to fuzz instrumented binaries in dumb mode,
we don't want them to detect instrumentation, since we won't be sending
fork server commands. This should be replaced with better auto-detection
later on, perhaps? */
if (!dumb_mode) setenv(SHM_ENV_VAR, shm_str, 1);
ck_free(shm_str);
trace_bits = shmat(shm_id, NULL, 0);
if (!trace_bits) PFATAL("shmat() failed");
}
/* Load postprocessor, if available. */
static void setup_post(void) {
void* dh;
u8* fn = getenv("AFL_POST_LIBRARY");
u32 tlen = 6;
if (!fn) return;
ACTF("Loading postprocessor from '%s'...", fn);
dh = dlopen(fn, RTLD_NOW);
if (!dh) FATAL("%s", dlerror());
post_handler = dlsym(dh, "afl_postprocess");
if (!post_handler) FATAL("Symbol 'afl_postprocess' not found.");
/* Do a quick test. It's better to segfault now than later =) */
post_handler("hello", &tlen);
OKF("Postprocessor installed successfully.");
}
/* Read all testcases from the input directory, then queue them for testing.
Called at startup. */
static void read_testcases(void) {
struct dirent **nl;
s32 nl_cnt;
u32 i;
u8* queue_fn;
/* Auto-detect non-in-place resumption attempts. */
queue_fn = alloc_printf("%s/queue", in_dir);
if (!access(queue_fn, F_OK)) in_dir = queue_fn; else ck_free(queue_fn);
ACTF("Scanning '%s'...", in_dir);
/* We use scandir() + alphasort() rather than readdir() because otherwise,
the ordering of test cases would vary somewhat randomly and would be
difficult to control. */
nl_cnt = scandir(in_dir, &nl, NULL, alphasort);
if (nl_cnt < 0) {
if (errno == ENOENT || errno == ENOTDIR)
SAYF("\n" cLRD "[-] " cRST
"The input directory does not seem to be valid - try again. The fuzzer needs\n"
" one or more test case to start with - ideally, a small file under 1 kB\n"
" or so. The cases must be stored as regular files directly in the input\n"
" directory.\n");
PFATAL("Unable to open '%s'", in_dir);
}
if (shuffle_queue && nl_cnt > 1) {
ACTF("Shuffling queue...");
shuffle_ptrs((void**)nl, nl_cnt);
}
for (i = 0; i < nl_cnt; i++) {
struct stat st;
u8* fn = alloc_printf("%s/%s", in_dir, nl[i]->d_name);
u8* dfn = alloc_printf("%s/.state/deterministic_done/%s", in_dir, nl[i]->d_name);
u8 passed_det = 0;
free(nl[i]); /* not tracked */
if (lstat(fn, &st) || access(fn, R_OK))
PFATAL("Unable to access '%s'", fn);
/* This also takes care of . and .. */
if (!S_ISREG(st.st_mode) || !st.st_size || strstr(fn, "/README.txt")) {
ck_free(fn);
ck_free(dfn);
continue;
}
if (st.st_size > MAX_FILE)
FATAL("Test case '%s' is too big (%s, limit is %s)", fn,
DMS(st.st_size), DMS(MAX_FILE));
/* Check for metadata that indicates that deterministic fuzzing
is complete for this entry. We don't want to repeat deterministic
fuzzing when resuming aborted scans, because it would be pointless
and probably very time-consuming. */
if (!access(dfn, F_OK)) passed_det = 1;
ck_free(dfn);
add_to_queue(fn, st.st_size, passed_det);
}
free(nl); /* not tracked */
if (!queued_paths) {
SAYF("\n" cLRD "[-] " cRST
"Looks like there are no valid test cases in the input directory! The fuzzer\n"
" needs one or more test case to start with - ideally, a small file under\n"
" 1 kB or so. The cases must be stored as regular files directly in the\n"
" input directory.\n");
FATAL("No usable test cases in '%s'", in_dir);
}
last_path_time = 0;
queued_at_start = queued_paths;
}
/* Helper function for load_extras. */
static int compare_extras_len(const void* p1, const void* p2) {
struct extra_data *e1 = (struct extra_data*)p1,
*e2 = (struct extra_data*)p2;
return e1->len - e2->len;
}
static int compare_extras_use_d(const void* p1, const void* p2) {
struct extra_data *e1 = (struct extra_data*)p1,
*e2 = (struct extra_data*)p2;
return e2->hit_cnt - e1->hit_cnt;
}
/* Read extras from a file, sort by size. */
static void load_extras_file(u8* fname, u32* min_len, u32* max_len,
u32 dict_level) {
FILE* f;
u8 buf[MAX_LINE];
u8 *lptr;
u32 cur_line = 0;
f = fopen(fname, "r");
if (!f) PFATAL("Unable to open '%s'", fname);
while ((lptr = fgets(buf, MAX_LINE, f))) {
u8 *rptr, *wptr;
u32 klen = 0;
cur_line++;
/* Trim on left and right. */
while (isspace(*lptr)) lptr++;
rptr = lptr + strlen(lptr) - 1;
while (rptr >= lptr && isspace(*rptr)) rptr--;
rptr++;
*rptr = 0;
/* Skip empty lines and comments. */
if (!*lptr || *lptr == '#') continue;
/* All other lines must end with '"', which we can consume. */
rptr--;
if (rptr < lptr || *rptr != '"')
FATAL("Malformed name=\"value\" pair in line %u.", cur_line);
*rptr = 0;
/* Skip alphanumerics and dashes (label). */
while (isalnum(*lptr) || *lptr == '_') lptr++;
/* If @number follows, parse that. */
if (*lptr == '@') {
lptr++;
if (atoi(lptr) > dict_level) continue;
while (isdigit(*lptr)) lptr++;
}
/* Skip whitespace and = signs. */
while (isspace(*lptr) || *lptr == '=') lptr++;
/* Consume opening '"'. */
if (*lptr != '"')
FATAL("Malformed name=\"keyword\" pair in line %u.", cur_line);
lptr++;
if (!*lptr) FATAL("Empty keyword in line %u.", cur_line);
/* Okay, let's allocate memory and copy data between "...", handling
\xNN escaping, \\, and \". */
extras = ck_realloc_block(extras, (extras_cnt + 1) *
sizeof(struct extra_data));
wptr = extras[extras_cnt].data = ck_alloc(rptr - lptr);
while (*lptr) {
char* hexdigits = "0123456789abcdef";
switch (*lptr) {
case 1 ... 31:
case 128 ... 255:
FATAL("Non-printable characters in line %u.", cur_line);
case '\\':
lptr++;
if (*lptr == '\\' || *lptr == '"') {
*(wptr++) = *(lptr++);
klen++;
break;
}
if (*lptr != 'x' || !isxdigit(lptr[1]) || !isxdigit(lptr[2]))
FATAL("Invalid escaping (not \\xNN) in line %u.", cur_line);
*(wptr++) =
((strchr(hexdigits, tolower(lptr[1])) - hexdigits) << 4) |
(strchr(hexdigits, tolower(lptr[2])) - hexdigits);
lptr += 3;
klen++;
break;
default:
*(wptr++) = *(lptr++);
klen++;
}
}
extras[extras_cnt].len = klen;
if (extras[extras_cnt].len > MAX_DICT_FILE)
FATAL("Keyword too big in line %u (%s, limit is %s)", cur_line,
DMS(klen), DMS(MAX_DICT_FILE));
if (*min_len > klen) *min_len = klen;
if (*max_len < klen) *max_len = klen;
extras_cnt++;
}
fclose(f);
}
/* Read extras from the extras directory and sort them by size. */
static void load_extras(u8* dir) {
DIR* d;
struct dirent* de;
u32 min_len = MAX_DICT_FILE, max_len = 0, dict_level = 0;
u8* x;
/* If the name ends with @, extract level and continue. */
if ((x = strchr(dir, '@'))) {
*x = 0;
dict_level = atoi(x + 1);
}
ACTF("Loading extra dictionary from '%s' (level %u)...", dir, dict_level);
d = opendir(dir);
if (!d) {
if (errno == ENOTDIR) {
load_extras_file(dir, &min_len, &max_len, dict_level);
goto check_and_sort;
}
PFATAL("Unable to open '%s'", dir);
}
if (x) FATAL("Dictionary levels not supported for directories.");
while ((de = readdir(d))) {
struct stat st;
u8* fn = alloc_printf("%s/%s", dir, de->d_name);
s32 fd;
if (lstat(fn, &st) || access(fn, R_OK))
PFATAL("Unable to access '%s'", fn);
/* This also takes care of . and .. */
if (!S_ISREG(st.st_mode) || !st.st_size) {
ck_free(fn);
continue;
}
if (st.st_size > MAX_DICT_FILE)
FATAL("Extra '%s' is too big (%s, limit is %s)", fn,
DMS(st.st_size), DMS(MAX_DICT_FILE));
if (min_len > st.st_size) min_len = st.st_size;
if (max_len < st.st_size) max_len = st.st_size;
extras = ck_realloc_block(extras, (extras_cnt + 1) *
sizeof(struct extra_data));
extras[extras_cnt].data = ck_alloc(st.st_size);
extras[extras_cnt].len = st.st_size;
fd = open(fn, O_RDONLY);
if (fd < 0) PFATAL("Unable to open '%s'", fn);
ck_read(fd, extras[extras_cnt].data, st.st_size, fn);
close(fd);
ck_free(fn);
extras_cnt++;
}
closedir(d);
check_and_sort:
if (!extras_cnt) FATAL("No usable files in '%s'", dir);
qsort(extras, extras_cnt, sizeof(struct extra_data), compare_extras_len);
OKF("Loaded %u extra tokens, size range %s to %s.", extras_cnt,
DMS(min_len), DMS(max_len));
if (max_len > 32)
WARNF("Some tokens are relatively large (%s) - consider trimming.",
DMS(max_len));
if (extras_cnt > MAX_DET_EXTRAS)
WARNF("More than %u tokens - will use them probabilistically.",
MAX_DET_EXTRAS);
}
/* Helper function for maybe_add_auto() */
static inline u8 memcmp_nocase(u8* m1, u8* m2, u32 len) {
while (len--) if (tolower(*(m1++)) ^ tolower(*(m2++))) return 1;
return 0;
}
/* Maybe add automatic extra. */
static void maybe_add_auto(u8* mem, u32 len) {
u32 i;
/* Allow users to specify that they don't want auto dictionaries. */
if (!MAX_AUTO_EXTRAS || !USE_AUTO_EXTRAS) return;
/* Skip runs of identical bytes. */
for (i = 1; i < len; i++)
if (mem[0] ^ mem[i]) break;
if (i == len) return;
/* Reject builtin interesting values. */
if (len == 2) {
i = sizeof(interesting_16) >> 1;
while (i--)
if (*((u16*)mem) == interesting_16[i] ||
*((u16*)mem) == SWAP16(interesting_16[i])) return;
}
if (len == 4) {
i = sizeof(interesting_32) >> 2;
while (i--)
if (*((u32*)mem) == interesting_32[i] ||
*((u32*)mem) == SWAP32(interesting_32[i])) return;
}
/* Reject anything that matches existing extras. Do a case-insensitive
match. We optimize by exploiting the fact that extras[] are sorted
by size. */
for (i = 0; i < extras_cnt; i++)
if (extras[i].len >= len) break;
for (; i < extras_cnt && extras[i].len == len; i++)
if (!memcmp_nocase(extras[i].data, mem, len)) return;
/* Last but not least, check a_extras[] for matches. There are no
guarantees of a particular sort order. */
auto_changed = 1;
for (i = 0; i < a_extras_cnt; i++) {
if (a_extras[i].len == len && !memcmp_nocase(a_extras[i].data, mem, len)) {
a_extras[i].hit_cnt++;
goto sort_a_extras;
}
}
/* At this point, looks like we're dealing with a new entry. So, let's
append it if we have room. Otherwise, let's randomly evict some other
entry from the bottom half of the list. */
if (a_extras_cnt < MAX_AUTO_EXTRAS) {
a_extras = ck_realloc_block(a_extras, (a_extras_cnt + 1) *
sizeof(struct extra_data));
a_extras[a_extras_cnt].data = ck_memdup(mem, len);
a_extras[a_extras_cnt].len = len;
a_extras_cnt++;
} else {
i = MAX_AUTO_EXTRAS / 2 +
UR((MAX_AUTO_EXTRAS + 1) / 2);
ck_free(a_extras[i].data);
a_extras[i].data = ck_memdup(mem, len);
a_extras[i].len = len;
a_extras[i].hit_cnt = 0;
}
sort_a_extras:
/* First, sort all auto extras by use count, descending order. */
qsort(a_extras, a_extras_cnt, sizeof(struct extra_data),
compare_extras_use_d);
/* Then, sort the top USE_AUTO_EXTRAS entries by size. */
qsort(a_extras, MIN(USE_AUTO_EXTRAS, a_extras_cnt),
sizeof(struct extra_data), compare_extras_len);
}
/* Save automatically generated extras. */
static void save_auto(void) {
u32 i;
if (!auto_changed) return;
auto_changed = 0;
for (i = 0; i < MIN(USE_AUTO_EXTRAS, a_extras_cnt); i++) {
u8* fn = alloc_printf("%s/queue/.state/auto_extras/auto_%06u", out_dir, i);
s32 fd;
fd = open(fn, O_WRONLY | O_CREAT | O_TRUNC, 0600);
if (fd < 0) PFATAL("Unable to create '%s'", fn);
ck_write(fd, a_extras[i].data, a_extras[i].len, fn);
close(fd);
ck_free(fn);
}
}
/* Load automatically generated extras. */
static void load_auto(void) {
u32 i;
for (i = 0; i < USE_AUTO_EXTRAS; i++) {
u8 tmp[MAX_AUTO_EXTRA + 1];
u8* fn = alloc_printf("%s/.state/auto_extras/auto_%06u", in_dir, i);
s32 fd, len;
fd = open(fn, O_RDONLY, 0600);
if (fd < 0) {
if (errno != ENOENT) PFATAL("Unable to open '%s'", fn);
ck_free(fn);
break;
}
/* We read one byte more to cheaply detect tokens that are too
long (and skip them). */
len = read(fd, tmp, MAX_AUTO_EXTRA + 1);
if (len < 0) PFATAL("Unable to read from '%s'", fn);
if (len >= MIN_AUTO_EXTRA && len <= MAX_AUTO_EXTRA)
maybe_add_auto(tmp, len);
close(fd);
ck_free(fn);
}
if (i) OKF("Loaded %u auto-discovered dictionary tokens.", i);
else OKF("No auto-generated dictionary tokens to reuse.");
}
/* Destroy extras. */
static void destroy_extras(void) {
u32 i;
for (i = 0; i < extras_cnt; i++)
ck_free(extras[i].data);
ck_free(extras);
for (i = 0; i < a_extras_cnt; i++)
ck_free(a_extras[i].data);
ck_free(a_extras);
}
/* Spin up fork server (instrumented mode only). The idea is explained here:
http://lcamtuf.blogspot.com/2014/10/fuzzing-binaries-without-execve.html
In essence, the instrumentation allows us to skip execve(), and just keep
cloning a stopped child. So, we just execute once, and then send commands
through a pipe. The other part of this logic is in afl-as.h. */
EXP_ST void init_forkserver(char** argv) {
static struct itimerval it;
int st_pipe[2], ctl_pipe[2];
int status;
s32 rlen;
ACTF("Spinning up the fork server...");
if (pipe(st_pipe) || pipe(ctl_pipe)) PFATAL("pipe() failed");
forksrv_pid = fork();
if (forksrv_pid < 0) PFATAL("fork() failed");
if (!forksrv_pid) {
struct rlimit r;
/* Umpf. On OpenBSD, the default fd limit for root users is set to
soft 128. Let's try to fix that... */
if (!getrlimit(RLIMIT_NOFILE, &r) && r.rlim_cur < FORKSRV_FD + 2) {
r.rlim_cur = FORKSRV_FD + 2;
setrlimit(RLIMIT_NOFILE, &r); /* Ignore errors */
}
if (mem_limit) {
r.rlim_max = r.rlim_cur = ((rlim_t)mem_limit) << 20;
#ifdef RLIMIT_AS
setrlimit(RLIMIT_AS, &r); /* Ignore errors */
#else
/* This takes care of OpenBSD, which doesn't have RLIMIT_AS, but
according to reliable sources, RLIMIT_DATA covers anonymous
maps - so we should be getting good protection against OOM bugs. */
setrlimit(RLIMIT_DATA, &r); /* Ignore errors */
#endif /* ^RLIMIT_AS */
}
/* Dumping cores is slow and can lead to anomalies if SIGKILL is delivered
before the dump is complete. */
r.rlim_max = r.rlim_cur = 0;
setrlimit(RLIMIT_CORE, &r); /* Ignore errors */
/* Isolate the process and configure standard descriptors. If out_file is
specified, stdin is /dev/null; otherwise, out_fd is cloned instead. */
setsid();
dup2(dev_null_fd, 1);
dup2(dev_null_fd, 2);
if (out_file) {
dup2(dev_null_fd, 0);
} else {
dup2(out_fd, 0);
close(out_fd);
}
/* Set up control and status pipes, close the unneeded original fds. */
if (dup2(ctl_pipe[0], FORKSRV_FD) < 0) PFATAL("dup2() failed");
if (dup2(st_pipe[1], FORKSRV_FD + 1) < 0) PFATAL("dup2() failed");
close(ctl_pipe[0]);
close(ctl_pipe[1]);
close(st_pipe[0]);
close(st_pipe[1]);
close(out_dir_fd);
close(dev_null_fd);
close(dev_urandom_fd);
close(fileno(plot_file));
/* This should improve performance a bit, since it stops the linker from
doing extra work post-fork(). */
if (!getenv("LD_BIND_LAZY")) setenv("LD_BIND_NOW", "1", 0);
/* Set sane defaults for ASAN if nothing else specified. */
setenv("ASAN_OPTIONS", "abort_on_error=1:"
"detect_leaks=0:"
"symbolize=0:"
"allocator_may_return_null=1", 0);
/* MSAN is tricky, because it doesn't support abort_on_error=1 at this
point. So, we do this in a very hacky way. */
setenv("MSAN_OPTIONS", "exit_code=" STRINGIFY(MSAN_ERROR) ":"
"symbolize=0:"
"abort_on_error=1:"
"allocator_may_return_null=1:"
"msan_track_origins=0", 0);
execv(target_path, argv);
/* Use a distinctive bitmap signature to tell the parent about execv()
falling through. */
*(u32*)trace_bits = EXEC_FAIL_SIG;
exit(0);
}
/* Close the unneeded endpoints. */
close(ctl_pipe[0]);
close(st_pipe[1]);
fsrv_ctl_fd = ctl_pipe[1];
fsrv_st_fd = st_pipe[0];
/* Wait for the fork server to come up, but don't wait too long. */
it.it_value.tv_sec = ((exec_tmout * FORK_WAIT_MULT) / 1000);
it.it_value.tv_usec = ((exec_tmout * FORK_WAIT_MULT) % 1000) * 1000;
setitimer(ITIMER_REAL, &it, NULL);
rlen = read(fsrv_st_fd, &status, 4);
it.it_value.tv_sec = 0;
it.it_value.tv_usec = 0;
setitimer(ITIMER_REAL, &it, NULL);
/* If we have a four-byte "hello" message from the server, we're all set.
Otherwise, try to figure out what went wrong. */
if (rlen == 4) {
OKF("All right - fork server is up.");
return;
}
if (child_timed_out)
FATAL("Timeout while initializing fork server (adjusting -t may help)");
if (waitpid(forksrv_pid, &status, 0) <= 0)
PFATAL("waitpid() failed");
if (WIFSIGNALED(status)) {
if (mem_limit && mem_limit < 500 && uses_asan) {
SAYF("\n" cLRD "[-] " cRST
"Whoops, the target binary crashed suddenly, before receiving any input\n"
" from the fuzzer! Since it seems to be built with ASAN and you have a\n"
" restrictive memory limit configured, this is expected; please read\n"
" %s/notes_for_asan.txt for help.\n", doc_path);
} else if (!mem_limit) {
SAYF("\n" cLRD "[-] " cRST
"Whoops, the target binary crashed suddenly, before receiving any input\n"
" from the fuzzer! There are several probable explanations:\n\n"
" - The binary is just buggy and explodes entirely on its own. If so, you\n"
" need to fix the underlying problem or find a better replacement.\n\n"
#ifdef __APPLE__
" - On MacOS X, the semantics of fork() syscalls are non-standard and may\n"
" break afl-fuzz performance optimizations when running platform-specific\n"
" targets. To fix this, set AFL_NO_FORKSRV=1 in the environment.\n\n"
#endif /* __APPLE__ */
" - Less likely, there is a horrible bug in the fuzzer. If other options\n"
" fail, poke <lcamtuf@coredump.cx> for troubleshooting tips.\n");
} else {
SAYF("\n" cLRD "[-] " cRST
"Whoops, the target binary crashed suddenly, before receiving any input\n"
" from the fuzzer! There are several probable explanations:\n\n"
" - The current memory limit (%s) is too restrictive, causing the\n"
" target to hit an OOM condition in the dynamic linker. Try bumping up\n"
" the limit with the -m setting in the command line. A simple way confirm\n"
" this diagnosis would be:\n\n"
#ifdef RLIMIT_AS
" ( ulimit -Sv $[%llu << 10]; /path/to/fuzzed_app )\n\n"
#else
" ( ulimit -Sd $[%llu << 10]; /path/to/fuzzed_app )\n\n"
#endif /* ^RLIMIT_AS */
" Tip: you can use http://jwilk.net/software/recidivm to quickly\n"
" estimate the required amount of virtual memory for the binary.\n\n"
" - The binary is just buggy and explodes entirely on its own. If so, you\n"
" need to fix the underlying problem or find a better replacement.\n\n"
#ifdef __APPLE__
" - On MacOS X, the semantics of fork() syscalls are non-standard and may\n"
" break afl-fuzz performance optimizations when running platform-specific\n"
" targets. To fix this, set AFL_NO_FORKSRV=1 in the environment.\n\n"
#endif /* __APPLE__ */
" - Less likely, there is a horrible bug in the fuzzer. If other options\n"
" fail, poke <lcamtuf@coredump.cx> for troubleshooting tips.\n",
DMS(mem_limit << 20), mem_limit - 1);
}
FATAL("Fork server crashed with signal %d", WTERMSIG(status));
}
if (*(u32*)trace_bits == EXEC_FAIL_SIG)
FATAL("Unable to execute target application ('%s')", argv[0]);
if (mem_limit && mem_limit < 500 && uses_asan) {
SAYF("\n" cLRD "[-] " cRST
"Hmm, looks like the target binary terminated before we could complete a\n"
" handshake with the injected code. Since it seems to be built with ASAN and\n"
" you have a restrictive memory limit configured, this is expected; please\n"
" read %s/notes_for_asan.txt for help.\n", doc_path);
} else if (!mem_limit) {
SAYF("\n" cLRD "[-] " cRST
"Hmm, looks like the target binary terminated before we could complete a\n"
" handshake with the injected code. Perhaps there is a horrible bug in the\n"
" fuzzer. Poke <lcamtuf@coredump.cx> for troubleshooting tips.\n");
} else {
SAYF("\n" cLRD "[-] " cRST
"Hmm, looks like the target binary terminated before we could complete a\n"
" handshake with the injected code. There are %s probable explanations:\n\n"
"%s"
" - The current memory limit (%s) is too restrictive, causing an OOM\n"
" fault in the dynamic linker. This can be fixed with the -m option. A\n"
" simple way to confirm the diagnosis may be:\n\n"
#ifdef RLIMIT_AS
" ( ulimit -Sv $[%llu << 10]; /path/to/fuzzed_app )\n\n"
#else
" ( ulimit -Sd $[%llu << 10]; /path/to/fuzzed_app )\n\n"
#endif /* ^RLIMIT_AS */
" Tip: you can use http://jwilk.net/software/recidivm to quickly\n"
" estimate the required amount of virtual memory for the binary.\n\n"
" - Less likely, there is a horrible bug in the fuzzer. If other options\n"
" fail, poke <lcamtuf@coredump.cx> for troubleshooting tips.\n",
getenv(DEFER_ENV_VAR) ? "three" : "two",
getenv(DEFER_ENV_VAR) ?
" - You are using deferred forkserver, but __AFL_INIT() is never\n"
" reached before the program terminates.\n\n" : "",
DMS(mem_limit << 20), mem_limit - 1);
}
FATAL("Fork server handshake failed");
}
/* Execute target application, monitoring for timeouts. Return status
information. The called program will update trace_bits[]. */
static u8 run_target(char** argv, u32 timeout) {
static struct itimerval it;
static u32 prev_timed_out = 0;
int status = 0;
u32 tb4;
child_timed_out = 0;
/* After this memset, trace_bits[] are effectively volatile, so we
must prevent any earlier operations from venturing into that
territory. */
memset(trace_bits, 0, MAP_SIZE);
MEM_BARRIER();
/* If we're running in "dumb" mode, we can't rely on the fork server
logic compiled into the target program, so we will just keep calling
execve(). There is a bit of code duplication between here and
init_forkserver(), but c'est la vie. */
if (dumb_mode == 1 || no_forkserver) {
child_pid = fork();
if (child_pid < 0) PFATAL("fork() failed");
if (!child_pid) {
struct rlimit r;
if (mem_limit) {
r.rlim_max = r.rlim_cur = ((rlim_t)mem_limit) << 20;
#ifdef RLIMIT_AS
setrlimit(RLIMIT_AS, &r); /* Ignore errors */
#else
setrlimit(RLIMIT_DATA, &r); /* Ignore errors */
#endif /* ^RLIMIT_AS */
}
r.rlim_max = r.rlim_cur = 0;
setrlimit(RLIMIT_CORE, &r); /* Ignore errors */
/* Isolate the process and configure standard descriptors. If out_file is
specified, stdin is /dev/null; otherwise, out_fd is cloned instead. */
setsid();
dup2(dev_null_fd, 1);
dup2(dev_null_fd, 2);
if (out_file) {
dup2(dev_null_fd, 0);
} else {
dup2(out_fd, 0);
close(out_fd);
}
/* On Linux, would be faster to use O_CLOEXEC. Maybe TODO. */
close(dev_null_fd);
close(out_dir_fd);
close(dev_urandom_fd);
close(fileno(plot_file));
/* Set sane defaults for ASAN if nothing else specified. */
setenv("ASAN_OPTIONS", "abort_on_error=1:"
"detect_leaks=0:"
"symbolize=0:"
"allocator_may_return_null=1", 0);
setenv("MSAN_OPTIONS", "exit_code=" STRINGIFY(MSAN_ERROR) ":"
"symbolize=0:"
"msan_track_origins=0", 0);
execv(target_path, argv);
/* Use a distinctive bitmap value to tell the parent about execv()
falling through. */
*(u32*)trace_bits = EXEC_FAIL_SIG;
exit(0);
}
} else {
s32 res;
/* In non-dumb mode, we have the fork server up and running, so simply
tell it to have at it, and then read back PID. */
if ((res = write(fsrv_ctl_fd, &prev_timed_out, 4)) != 4) {
if (stop_soon) return 0;
RPFATAL(res, "Unable to request new process from fork server (OOM?)");
}
if ((res = read(fsrv_st_fd, &child_pid, 4)) != 4) {
if (stop_soon) return 0;
RPFATAL(res, "Unable to request new process from fork server (OOM?)");
}
if (child_pid <= 0) FATAL("Fork server is misbehaving (OOM?)");
}
/* Configure timeout, as requested by user, then wait for child to terminate. */
it.it_value.tv_sec = (timeout / 1000);
it.it_value.tv_usec = (timeout % 1000) * 1000;
setitimer(ITIMER_REAL, &it, NULL);
/* The SIGALRM handler simply kills the child_pid and sets child_timed_out. */
if (dumb_mode == 1 || no_forkserver) {
if (waitpid(child_pid, &status, 0) <= 0) PFATAL("waitpid() failed");
} else {
s32 res;
if ((res = read(fsrv_st_fd, &status, 4)) != 4) {
if (stop_soon) return 0;
RPFATAL(res, "Unable to communicate with fork server (OOM?)");
}
}
if (!WIFSTOPPED(status)) child_pid = 0;
it.it_value.tv_sec = 0;
it.it_value.tv_usec = 0;
setitimer(ITIMER_REAL, &it, NULL);
total_execs++;
/* Any subsequent operations on trace_bits must not be moved by the
compiler below this point. Past this location, trace_bits[] behave
very normally and do not have to be treated as volatile. */
MEM_BARRIER();
tb4 = *(u32*)trace_bits;
#ifdef __x86_64__
classify_counts((u64*)trace_bits);
#else
classify_counts((u32*)trace_bits);
#endif /* ^__x86_64__ */
prev_timed_out = child_timed_out;
/* Report outcome to caller. */
if (WIFSIGNALED(status) && !stop_soon) {
kill_signal = WTERMSIG(status);
if (child_timed_out && kill_signal == SIGKILL) return FAULT_TMOUT;
return FAULT_CRASH;
}
/* A somewhat nasty hack for MSAN, which doesn't support abort_on_error and
must use a special exit code. */
if (uses_asan && WEXITSTATUS(status) == MSAN_ERROR) {
kill_signal = 0;
return FAULT_CRASH;
}
if ((dumb_mode == 1 || no_forkserver) && tb4 == EXEC_FAIL_SIG)
return FAULT_ERROR;
return FAULT_NONE;
}
/* Write modified data to file for testing. If out_file is set, the old file
is unlinked and a new one is created. Otherwise, out_fd is rewound and
truncated. */
static void write_to_testcase(void* mem, u32 len) {
s32 fd = out_fd;
if (out_file) {
unlink(out_file); /* Ignore errors. */
fd = open(out_file, O_WRONLY | O_CREAT | O_EXCL, 0600);
if (fd < 0) PFATAL("Unable to create '%s'", out_file);
} else lseek(fd, 0, SEEK_SET);
ck_write(fd, mem, len, out_file);
if (!out_file) {
if (ftruncate(fd, len)) PFATAL("ftruncate() failed");
lseek(fd, 0, SEEK_SET);
} else close(fd);
}
/* The same, but with an adjustable gap. Used for trimming. */
static void write_with_gap(void* mem, u32 len, u32 skip_at, u32 skip_len) {
s32 fd = out_fd;
u32 tail_len = len - skip_at - skip_len;
if (out_file) {
unlink(out_file); /* Ignore errors. */
fd = open(out_file, O_WRONLY | O_CREAT | O_EXCL, 0600);
if (fd < 0) PFATAL("Unable to create '%s'", out_file);
} else lseek(fd, 0, SEEK_SET);
if (skip_at) ck_write(fd, mem, skip_at, out_file);
if (tail_len) ck_write(fd, mem + skip_at + skip_len, tail_len, out_file);
if (!out_file) {
if (ftruncate(fd, len - skip_len)) PFATAL("ftruncate() failed");
lseek(fd, 0, SEEK_SET);
} else close(fd);
}
static void show_stats(void);
/* Calibrate a new test case. This is done when processing the input directory
to warn about flaky or otherwise problematic test cases early on; and when
new paths are discovered to detect variable behavior and so on. */
static u8 calibrate_case(char** argv, struct queue_entry* q, u8* use_mem,
u32 handicap, u8 from_queue) {
static u8 first_trace[MAP_SIZE];
u8 fault = 0, new_bits = 0, var_detected = 0,
first_run = (q->exec_cksum == 0);
u64 start_us, stop_us;
s32 old_sc = stage_cur, old_sm = stage_max;
u32 use_tmout = exec_tmout;
u8* old_sn = stage_name;
/* Be a bit more generous about timeouts when resuming sessions, or when
trying to calibrate already-added finds. This helps avoid trouble due
to intermittent latency. */
if (!from_queue || resuming_fuzz)
use_tmout = MAX(exec_tmout + CAL_TMOUT_ADD,
exec_tmout * CAL_TMOUT_PERC / 100);
q->cal_failed++;
stage_name = "calibration";
stage_max = fast_cal ? 3 : CAL_CYCLES;
/* Make sure the forkserver is up before we do anything, and let's not
count its spin-up time toward binary calibration. */
if (dumb_mode != 1 && !no_forkserver && !forksrv_pid)
init_forkserver(argv);
if (q->exec_cksum) memcpy(first_trace, trace_bits, MAP_SIZE);
start_us = get_cur_time_us();
for (stage_cur = 0; stage_cur < stage_max; stage_cur++) {
u32 cksum;
if (!first_run && !(stage_cur % stats_update_freq)) show_stats();
write_to_testcase(use_mem, q->len);
fault = run_target(argv, use_tmout);
/* stop_soon is set by the handler for Ctrl+C. When it's pressed,
we want to bail out quickly. */
if (stop_soon || fault != crash_mode) goto abort_calibration;
if (!dumb_mode && !stage_cur && !count_bytes(trace_bits)) {
fault = FAULT_NOINST;
goto abort_calibration;
}
cksum = hash32(trace_bits, MAP_SIZE, HASH_CONST);
if (q->exec_cksum != cksum) {
u8 hnb = has_new_bits(virgin_bits);
if (hnb > new_bits) new_bits = hnb;
if (q->exec_cksum) {
u32 i;
for (i = 0; i < MAP_SIZE; i++) {
if (!var_bytes[i] && first_trace[i] != trace_bits[i]) {
var_bytes[i] = 1;
stage_max = CAL_CYCLES_LONG;
}
}
var_detected = 1;
} else {
q->exec_cksum = cksum;
memcpy(first_trace, trace_bits, MAP_SIZE);
}
}
}
stop_us = get_cur_time_us();
total_cal_us += stop_us - start_us;
total_cal_cycles += stage_max;
/* OK, let's collect some stats about the performance of this test case.
This is used for fuzzing air time calculations in calculate_score(). */
q->exec_us = (stop_us - start_us) / stage_max;
q->bitmap_size = count_bytes(trace_bits);
q->handicap = handicap;
q->cal_failed = 0;
total_bitmap_size += q->bitmap_size;
total_bitmap_entries++;
update_bitmap_score(q);
/* If this case didn't result in new output from the instrumentation, tell
parent. This is a non-critical problem, but something to warn the user
about. */
if (!dumb_mode && first_run && !fault && !new_bits) fault = FAULT_NOBITS;
abort_calibration:
if (new_bits == 2 && !q->has_new_cov) {
q->has_new_cov = 1;
queued_with_cov++;
}
/* Mark variable paths. */
if (var_detected) {
var_byte_count = count_bytes(var_bytes);
if (!q->var_behavior) {
mark_as_variable(q);
queued_variable++;
}
}
stage_name = old_sn;
stage_cur = old_sc;
stage_max = old_sm;
if (!first_run) show_stats();
return fault;
}
/* Examine map coverage. Called once, for first test case. */
static void check_map_coverage(void) {
u32 i;
if (count_bytes(trace_bits) < 100) return;
for (i = (1 << (MAP_SIZE_POW2 - 1)); i < MAP_SIZE; i++)
if (trace_bits[i]) return;
WARNF("Recompile binary with newer version of afl to improve coverage!");
}
/* Perform dry run of all test cases to confirm that the app is working as
expected. This is done only for the initial inputs, and only once. */
static void perform_dry_run(char** argv) {
struct queue_entry* q = queue;
u32 cal_failures = 0;
u8* skip_crashes = getenv("AFL_SKIP_CRASHES");
while (q) {
u8* use_mem;
u8 res;
s32 fd;
u8* fn = strrchr(q->fname, '/') + 1;
ACTF("Attempting dry run with '%s'...", fn);
fd = open(q->fname, O_RDONLY);
if (fd < 0) PFATAL("Unable to open '%s'", q->fname);
use_mem = ck_alloc_nozero(q->len);
if (read(fd, use_mem, q->len) != q->len)
FATAL("Short read from '%s'", q->fname);
close(fd);
res = calibrate_case(argv, q, use_mem, 0, 1);
ck_free(use_mem);
if (stop_soon) return;
if (res == crash_mode || res == FAULT_NOBITS)
SAYF(cGRA " len = %u, map size = %u, exec speed = %llu us\n" cRST,
q->len, q->bitmap_size, q->exec_us);
switch (res) {
case FAULT_NONE:
if (q == queue) check_map_coverage();
if (crash_mode) FATAL("Test case '%s' does *NOT* crash", fn);
break;
case FAULT_TMOUT:
if (timeout_given) {
/* The -t nn+ syntax in the command line sets timeout_given to '2' and
instructs afl-fuzz to tolerate but skip queue entries that time
out. */
if (timeout_given > 1) {
WARNF("Test case results in a timeout (skipping)");
q->cal_failed = CAL_CHANCES;
cal_failures++;
break;
}
SAYF("\n" cLRD "[-] " cRST
"The program took more than %u ms to process one of the initial test cases.\n"
" Usually, the right thing to do is to relax the -t option - or to delete it\n"
" altogether and allow the fuzzer to auto-calibrate. That said, if you know\n"
" what you are doing and want to simply skip the unruly test cases, append\n"
" '+' at the end of the value passed to -t ('-t %u+').\n", exec_tmout,
exec_tmout);
FATAL("Test case '%s' results in a timeout", fn);
} else {
SAYF("\n" cLRD "[-] " cRST
"The program took more than %u ms to process one of the initial test cases.\n"
" This is bad news; raising the limit with the -t option is possible, but\n"
" will probably make the fuzzing process extremely slow.\n\n"
" If this test case is just a fluke, the other option is to just avoid it\n"
" altogether, and find one that is less of a CPU hog.\n", exec_tmout);
FATAL("Test case '%s' results in a timeout", fn);
}
case FAULT_CRASH:
if (crash_mode) break;
if (skip_crashes) {
WARNF("Test case results in a crash (skipping)");
q->cal_failed = CAL_CHANCES;
cal_failures++;
break;
}
if (mem_limit) {
SAYF("\n" cLRD "[-] " cRST
"Oops, the program crashed with one of the test cases provided. There are\n"
" several possible explanations:\n\n"
" - The test case causes known crashes under normal working conditions. If\n"
" so, please remove it. The fuzzer should be seeded with interesting\n"
" inputs - but not ones that cause an outright crash.\n\n"
" - The current memory limit (%s) is too low for this program, causing\n"
" it to die due to OOM when parsing valid files. To fix this, try\n"
" bumping it up with the -m setting in the command line. If in doubt,\n"
" try something along the lines of:\n\n"
#ifdef RLIMIT_AS
" ( ulimit -Sv $[%llu << 10]; /path/to/binary [...] <testcase )\n\n"
#else
" ( ulimit -Sd $[%llu << 10]; /path/to/binary [...] <testcase )\n\n"
#endif /* ^RLIMIT_AS */
" Tip: you can use http://jwilk.net/software/recidivm to quickly\n"
" estimate the required amount of virtual memory for the binary. Also,\n"
" if you are using ASAN, see %s/notes_for_asan.txt.\n\n"
#ifdef __APPLE__
" - On MacOS X, the semantics of fork() syscalls are non-standard and may\n"
" break afl-fuzz performance optimizations when running platform-specific\n"
" binaries. To fix this, set AFL_NO_FORKSRV=1 in the environment.\n\n"
#endif /* __APPLE__ */
" - Least likely, there is a horrible bug in the fuzzer. If other options\n"
" fail, poke <lcamtuf@coredump.cx> for troubleshooting tips.\n",
DMS(mem_limit << 20), mem_limit - 1, doc_path);
} else {
SAYF("\n" cLRD "[-] " cRST
"Oops, the program crashed with one of the test cases provided. There are\n"
" several possible explanations:\n\n"
" - The test case causes known crashes under normal working conditions. If\n"
" so, please remove it. The fuzzer should be seeded with interesting\n"
" inputs - but not ones that cause an outright crash.\n\n"
#ifdef __APPLE__
" - On MacOS X, the semantics of fork() syscalls are non-standard and may\n"
" break afl-fuzz performance optimizations when running platform-specific\n"
" binaries. To fix this, set AFL_NO_FORKSRV=1 in the environment.\n\n"
#endif /* __APPLE__ */
" - Least likely, there is a horrible bug in the fuzzer. If other options\n"
" fail, poke <lcamtuf@coredump.cx> for troubleshooting tips.\n");
}
FATAL("Test case '%s' results in a crash", fn);
case FAULT_ERROR:
FATAL("Unable to execute target application ('%s')", argv[0]);
case FAULT_NOINST:
FATAL("No instrumentation detected");
case FAULT_NOBITS:
useless_at_start++;
if (!in_bitmap && !shuffle_queue)
WARNF("No new instrumentation output, test case may be useless.");
break;
}
if (q->var_behavior) WARNF("Instrumentation output varies across runs.");
q = q->next;
}
if (cal_failures) {
if (cal_failures == queued_paths)
FATAL("All test cases time out%s, giving up!",
skip_crashes ? " or crash" : "");
WARNF("Skipped %u test cases (%0.02f%%) due to timeouts%s.", cal_failures,
((double)cal_failures) * 100 / queued_paths,
skip_crashes ? " or crashes" : "");
if (cal_failures * 5 > queued_paths)
WARNF(cLRD "High percentage of rejected test cases, check settings!");
}
OKF("All test cases processed.");
}
/* Helper function: link() if possible, copy otherwise. */
static void link_or_copy(u8* old_path, u8* new_path) {
s32 i = link(old_path, new_path);
s32 sfd, dfd;
u8* tmp;
if (!i) return;
sfd = open(old_path, O_RDONLY);
if (sfd < 0) PFATAL("Unable to open '%s'", old_path);
dfd = open(new_path, O_WRONLY | O_CREAT | O_EXCL, 0600);
if (dfd < 0) PFATAL("Unable to create '%s'", new_path);
tmp = ck_alloc(64 * 1024);
while ((i = read(sfd, tmp, 64 * 1024)) > 0)
ck_write(dfd, tmp, i, new_path);
if (i < 0) PFATAL("read() failed");
ck_free(tmp);
close(sfd);
close(dfd);
}
static void nuke_resume_dir(void);
/* Create hard links for input test cases in the output directory, choosing
good names and pivoting accordingly. */
static void pivot_inputs(void) {
struct queue_entry* q = queue;
u32 id = 0;
ACTF("Creating hard links for all input files...");
while (q) {
u8 *nfn, *rsl = strrchr(q->fname, '/');
u32 orig_id;
if (!rsl) rsl = q->fname; else rsl++;
/* If the original file name conforms to the syntax and the recorded
ID matches the one we'd assign, just use the original file name.
This is valuable for resuming fuzzing runs. */
#ifndef SIMPLE_FILES
# define CASE_PREFIX "id:"
#else
# define CASE_PREFIX "id_"
#endif /* ^!SIMPLE_FILES */
if (!strncmp(rsl, CASE_PREFIX, 3) &&
sscanf(rsl + 3, "%06u", &orig_id) == 1 && orig_id == id) {
u8* src_str;