Compare commits

...

38 Commits
Origin ... main

Author SHA1 Message Date
emmm 3ac2f5195d 132
1 month ago
emmm 5097ecdef3 abab
1 month ago
BagPipeOuO eb4a4ff095 Merge branch 'main' of https://bdgit.educoder.net/m5cn9itjr/AFL
1 month ago
BagPipeOuO a8bc0b34e0 Merge branch 'main' of https://bdgit.educoder.net/m5cn9itjr/AFL
1 month ago
BagPipeOuO eaf2be4d74 Merge branch 'main' of https://bdgit.educoder.net/m5cn9itjr/AFL
1 month ago
BagPipeOuO 13e33c8bed 完善了泛读报告,增加必要图片,充实了内容
1 month ago
dongloong 3138560abe newnew
1 month ago
BagPipeOuO 3e33c0936a 1.8
1 month ago
BagPipeOuO 6a6c239329 批注了afl-as.c
1 month ago
BagPipeOuO 74366041a3 Merge branch 'main' of https://bdgit.educoder.net/m5cn9itjr/AFL
1 month ago
BagPipeOuO 78d04a2075 批注了makefile
1 month ago
yangguangmamama a5ec96676c 123
1 month ago
dongloong 2fc8b00adc new
1 month ago
emmm fc527fee69 lalala
1 month ago
emmm a6704ef8c1 hahaha
1 month ago
GodKingBS 7a3b0af177 123456
2 months ago
“18670363079” 0319921f2d Merge branch 'main' of https://bdgit.educoder.net/m5cn9itjr/AFL
2 months ago
BagPipeOuO 00200f2b09 Merge branch 'main' of https://bdgit.educoder.net/m5cn9itjr/AFL
2 months ago
BagPipeOuO d0d60336ae 完成了泛读报告
2 months ago
“18670363079” 3bf32e4de2 1.2
2 months ago
GodKingBS 6860809dea Merge branch 'main' of https://bdgit.educoder.net/m5cn9itjr/AFL
2 months ago
GodKingBS 20580d5a1c alloc
2 months ago
BagPipeOuO 34e563f83c Merge branch 'main' of https://bdgit.educoder.net/m5cn9itjr/AFL
2 months ago
BagPipeOuO 9ba3397419 Renamed the report
2 months ago
GodKingBS 31b9bf56f4 Merge branch 'main' of https://bdgit.educoder.net/m5cn9itjr/AFL
2 months ago
GodKingBS bf0e5c1714 config android
2 months ago
emmm 00b83c22e4 maao
2 months ago
GodKingBS 946e85b964 hash debug
2 months ago
GodKingBS df9f16952d instr
2 months ago
GodKingBS cb893b3926 test
2 months ago
GodKingBS cba725cf09 types
2 months ago
BagPipeOuO 8705567f0a Renamed the report
4 months ago
BagPipeOuO 94e115ecbd Modified the report of pre_Reading.
4 months ago
BagPipeOuO 6db848a67e Revert to previous state
4 months ago
“18670363079” 3a57a58e66 1
4 months ago
m5cn9itjr cea1d228b0 Delete 'docs/~$24秋软件工程课程报告一:AFL泛读报告.docx'
4 months ago
BagPipeOuO 8da03c8a59 qwq
4 months ago
BagPipeOuO c7ef505a22 修改了目录格式使得满足作业要求
4 months ago

@ -0,0 +1,18 @@
{
"configurations": [
{
"name": "windows-gcc-x86",
"includePath": [
"${workspaceFolder}/**"
],
"compilerPath": "C:/Program Files/MinGW/bin/gcc.exe",
"cStandard": "${default}",
"cppStandard": "${default}",
"intelliSenseMode": "windows-gcc-x86",
"compilerArgs": [
""
]
}
],
"version": 4
}

@ -0,0 +1,140 @@
cc_defaults {
name: "afl-defaults",
cflags: [
"-funroll-loops",
"-Wno-pointer-sign",
"-Wno-pointer-arith",
"-Wno-sign-compare",
"-Wno-unused-parameter",
"-Wno-unused-function",
"-Wno-format",
"-Wno-user-defined-warnings",
"-DUSE_TRACE_PC=1",
"-DBIN_PATH=\"out/host/linux-x86/bin\"",
"-DDOC_PATH=\"out/host/linux-x86/shared/doc/afl\"",
"-D__USE_GNU",
],
}
cc_binary {
name: "afl-fuzz",
static_executable: true,
host_supported: true,
defaults: [
"afl-defaults",
],
srcs: [
"afl-fuzz.c",
],
}
cc_binary {
name: "afl-showmap",
static_executable: true,
host_supported: true,
defaults: [
"afl-defaults",
],
srcs: [
"afl-showmap.c",
],
}
cc_binary {
name: "afl-tmin",
static_executable: true,
host_supported: true,
defaults: [
"afl-defaults",
],
srcs: [
"afl-tmin.c",
],
}
cc_binary {
name: "afl-analyze",
static_executable: true,
host_supported: true,
defaults: [
"afl-defaults",
],
srcs: [
"afl-analyze.c",
],
}
cc_binary {
name: "afl-gotcpu",
static_executable: true,
host_supported: true,
defaults: [
"afl-defaults",
],
srcs: [
"afl-gotcpu.c",
],
}
cc_binary_host {
name: "afl-clang-fast",
static_executable: true,
defaults: [
"afl-defaults",
],
cflags: [
"-D__ANDROID__",
"-DAFL_PATH=\"out/host/linux-x86/lib64\"",
],
srcs: [
"llvm_mode/afl-clang-fast.c",
],
}
cc_binary_host {
name: "afl-clang-fast++",
static_executable: true,
defaults: [
"afl-defaults",
],
cflags: [
"-D__ANDROID__",
"-DAFL_PATH=\"out/host/linux-x86/lib64\"",
],
srcs: [
"llvm_mode/afl-clang-fast.c",
],
}
cc_library_static {
name: "afl-llvm-rt",
compile_multilib: "both",
vendor_available: true,
host_supported: true,
recovery_available: true,
defaults: [
"afl-defaults",
],
srcs: [
"llvm_mode/afl-llvm-rt.o.c",
],
}

@ -1,28 +1,28 @@
# How to Contribute
We'd love to accept your patches and contributions to this project. There are
just a few small guidelines you need to follow.
## Contributor License Agreement
Contributions to this project must be accompanied by a Contributor License
Agreement. You (or your employer) retain the copyright to your contribution;
this simply gives us permission to use and redistribute your contributions as
part of the project. Head over to <https://cla.developers.google.com/> to see
your current agreements on file or to sign a new one.
You generally only need to submit a CLA once, so if you've already submitted one
(even if it was for a different project), you probably don't need to do it
again.
## Code reviews
All submissions, including submissions by project members, require review. We
use GitHub pull requests for this purpose. Consult
[GitHub Help](https://help.github.com/articles/about-pull-requests/) for more
information on using pull requests.
## Community Guidelines
This project follows [Google's Open Source Community
Guidelines](https://opensource.google.com/conduct/).
# How to Contribute
We'd love to accept your patches and contributions to this project. There are
just a few small guidelines you need to follow.
## Contributor License Agreement
Contributions to this project must be accompanied by a Contributor License
Agreement. You (or your employer) retain the copyright to your contribution;
this simply gives us permission to use and redistribute your contributions as
part of the project. Head over to <https://cla.developers.google.com/> to see
your current agreements on file or to sign a new one.
You generally only need to submit a CLA once, so if you've already submitted one
(even if it was for a different project), you probably don't need to do it
again.
## Code reviews
All submissions, including submissions by project members, require review. We
use GitHub pull requests for this purpose. Consult
[GitHub Help](https://help.github.com/articles/about-pull-requests/) for more
information on using pull requests.
## Community Guidelines
This project follows [Google's Open Source Community
Guidelines](https://opensource.google.com/conduct/).

@ -1,153 +1,140 @@
#
# american fuzzy lop - makefile
# -----------------------------
#
# Written and maintained by Michal Zalewski <lcamtuf@google.com>
#
# Copyright 2013, 2014, 2015, 2016, 2017 Google LLC All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
PROGNAME = afl
VERSION = $(shell grep '^\#define VERSION ' config.h | cut -d '"' -f2)
PREFIX ?= /usr/local
BIN_PATH = $(PREFIX)/bin
HELPER_PATH = $(PREFIX)/lib/afl
DOC_PATH = $(PREFIX)/share/doc/afl
MISC_PATH = $(PREFIX)/share/afl
# PROGS intentionally omit afl-as, which gets installed elsewhere.
PROGS = afl-gcc afl-fuzz afl-showmap afl-tmin afl-gotcpu afl-analyze
SH_PROGS = afl-plot afl-cmin afl-whatsup
CFLAGS ?= -O3 -funroll-loops
CFLAGS += -Wall -D_FORTIFY_SOURCE=2 -g -Wno-pointer-sign \
-DAFL_PATH=\"$(HELPER_PATH)\" -DDOC_PATH=\"$(DOC_PATH)\" \
-DBIN_PATH=\"$(BIN_PATH)\"
ifneq "$(filter Linux GNU%,$(shell uname))" ""
LDFLAGS += -ldl
endif
ifeq "$(findstring clang, $(shell $(CC) --version 2>/dev/null))" ""
TEST_CC = afl-gcc
else
TEST_CC = afl-clang
endif
COMM_HDR = alloc-inl.h config.h debug.h types.h
all: test_x86 $(PROGS) afl-as test_build all_done
ifndef AFL_NO_X86
test_x86:
@echo "[*] Checking for the ability to compile x86 code..."
@echo 'main() { __asm__("xorb %al, %al"); }' | $(CC) -w -x c - -o .test || ( echo; echo "Oops, looks like your compiler can't generate x86 code."; echo; echo "Don't panic! You can use the LLVM or QEMU mode, but see docs/INSTALL first."; echo "(To ignore this error, set AFL_NO_X86=1 and try again.)"; echo; exit 1 )
@rm -f .test
@echo "[+] Everything seems to be working, ready to compile."
else
test_x86:
@echo "[!] Note: skipping x86 compilation checks (AFL_NO_X86 set)."
endif
afl-gcc: afl-gcc.c $(COMM_HDR) | test_x86
$(CC) $(CFLAGS) $@.c -o $@ $(LDFLAGS)
set -e; for i in afl-g++ afl-clang afl-clang++; do ln -sf afl-gcc $$i; done
afl-as: afl-as.c afl-as.h $(COMM_HDR) | test_x86
$(CC) $(CFLAGS) $@.c -o $@ $(LDFLAGS)
ln -sf afl-as as
afl-fuzz: afl-fuzz.c $(COMM_HDR) | test_x86
$(CC) $(CFLAGS) $@.c -o $@ $(LDFLAGS)
afl-showmap: afl-showmap.c $(COMM_HDR) | test_x86
$(CC) $(CFLAGS) $@.c -o $@ $(LDFLAGS)
afl-tmin: afl-tmin.c $(COMM_HDR) | test_x86
$(CC) $(CFLAGS) $@.c -o $@ $(LDFLAGS)
afl-analyze: afl-analyze.c $(COMM_HDR) | test_x86
$(CC) $(CFLAGS) $@.c -o $@ $(LDFLAGS)
afl-gotcpu: afl-gotcpu.c $(COMM_HDR) | test_x86
$(CC) $(CFLAGS) $@.c -o $@ $(LDFLAGS)
ifndef AFL_NO_X86
test_build: afl-gcc afl-as afl-showmap
@echo "[*] Testing the CC wrapper and instrumentation output..."
unset AFL_USE_ASAN AFL_USE_MSAN; AFL_QUIET=1 AFL_INST_RATIO=100 AFL_PATH=. ./$(TEST_CC) $(CFLAGS) test-instr.c -o test-instr $(LDFLAGS)
./afl-showmap -m none -q -o .test-instr0 ./test-instr < /dev/null
echo 1 | ./afl-showmap -m none -q -o .test-instr1 ./test-instr
@rm -f test-instr
@cmp -s .test-instr0 .test-instr1; DR="$$?"; rm -f .test-instr0 .test-instr1; if [ "$$DR" = "0" ]; then echo; echo "Oops, the instrumentation does not seem to be behaving correctly!"; echo; echo "Please ping <lcamtuf@google.com> to troubleshoot the issue."; echo; exit 1; fi
@echo "[+] All right, the instrumentation seems to be working!"
else
test_build: afl-gcc afl-as afl-showmap
@echo "[!] Note: skipping build tests (you may need to use LLVM or QEMU mode)."
endif
all_done: test_build
@if [ ! "`which clang 2>/dev/null`" = "" ]; then echo "[+] LLVM users: see llvm_mode/README.llvm for a faster alternative to afl-gcc."; fi
@echo "[+] All done! Be sure to review README - it's pretty short and useful."
@if [ "`uname`" = "Darwin" ]; then printf "\nWARNING: Fuzzing on MacOS X is slow because of the unusually high overhead of\nfork() on this OS. Consider using Linux or *BSD. You can also use VirtualBox\n(virtualbox.org) to put AFL inside a Linux or *BSD VM.\n\n"; fi
@! tty <&1 >/dev/null || printf "\033[0;30mNOTE: If you can read this, your terminal probably uses white background.\nThis will make the UI hard to read. See docs/status_screen.txt for advice.\033[0m\n" 2>/dev/null
.NOTPARALLEL: clean
clean:
rm -f $(PROGS) afl-as as afl-g++ afl-clang afl-clang++ *.o *~ a.out core core.[1-9][0-9]* *.stackdump test .test test-instr .test-instr0 .test-instr1 qemu_mode/qemu-2.10.0.tar.bz2 afl-qemu-trace
rm -rf out_dir qemu_mode/qemu-2.10.0
$(MAKE) -C llvm_mode clean
$(MAKE) -C libdislocator clean
$(MAKE) -C libtokencap clean
install: all
mkdir -p -m 755 $${DESTDIR}$(BIN_PATH) $${DESTDIR}$(HELPER_PATH) $${DESTDIR}$(DOC_PATH) $${DESTDIR}$(MISC_PATH)
rm -f $${DESTDIR}$(BIN_PATH)/afl-plot.sh
install -m 755 $(PROGS) $(SH_PROGS) $${DESTDIR}$(BIN_PATH)
rm -f $${DESTDIR}$(BIN_PATH)/afl-as
if [ -f afl-qemu-trace ]; then install -m 755 afl-qemu-trace $${DESTDIR}$(BIN_PATH); fi
ifndef AFL_TRACE_PC
if [ -f afl-clang-fast -a -f afl-llvm-pass.so -a -f afl-llvm-rt.o ]; then set -e; install -m 755 afl-clang-fast $${DESTDIR}$(BIN_PATH); ln -sf afl-clang-fast $${DESTDIR}$(BIN_PATH)/afl-clang-fast++; install -m 755 afl-llvm-pass.so afl-llvm-rt.o $${DESTDIR}$(HELPER_PATH); fi
else
if [ -f afl-clang-fast -a -f afl-llvm-rt.o ]; then set -e; install -m 755 afl-clang-fast $${DESTDIR}$(BIN_PATH); ln -sf afl-clang-fast $${DESTDIR}$(BIN_PATH)/afl-clang-fast++; install -m 755 afl-llvm-rt.o $${DESTDIR}$(HELPER_PATH); fi
endif
if [ -f afl-llvm-rt-32.o ]; then set -e; install -m 755 afl-llvm-rt-32.o $${DESTDIR}$(HELPER_PATH); fi
if [ -f afl-llvm-rt-64.o ]; then set -e; install -m 755 afl-llvm-rt-64.o $${DESTDIR}$(HELPER_PATH); fi
set -e; for i in afl-g++ afl-clang afl-clang++; do ln -sf afl-gcc $${DESTDIR}$(BIN_PATH)/$$i; done
install -m 755 afl-as $${DESTDIR}$(HELPER_PATH)
ln -sf afl-as $${DESTDIR}$(HELPER_PATH)/as
install -m 644 README.md docs/ChangeLog docs/*.txt $${DESTDIR}$(DOC_PATH)
cp -r testcases/ $${DESTDIR}$(MISC_PATH)
cp -r dictionaries/ $${DESTDIR}$(MISC_PATH)
publish: clean
test "`basename $$PWD`" = "AFL" || exit 1
test -f ~/www/afl/releases/$(PROGNAME)-$(VERSION).tgz; if [ "$$?" = "0" ]; then echo; echo "Change program version in config.h, mmkay?"; echo; exit 1; fi
cd ..; rm -rf $(PROGNAME)-$(VERSION); cp -pr $(PROGNAME) $(PROGNAME)-$(VERSION); \
tar -cvz -f ~/www/afl/releases/$(PROGNAME)-$(VERSION).tgz $(PROGNAME)-$(VERSION)
chmod 644 ~/www/afl/releases/$(PROGNAME)-$(VERSION).tgz
( cd ~/www/afl/releases/; ln -s -f $(PROGNAME)-$(VERSION).tgz $(PROGNAME)-latest.tgz )
cat docs/README >~/www/afl/README.txt
cat docs/status_screen.txt >~/www/afl/status_screen.txt
cat docs/historical_notes.txt >~/www/afl/historical_notes.txt
cat docs/technical_details.txt >~/www/afl/technical_details.txt
cat docs/ChangeLog >~/www/afl/ChangeLog.txt
cat docs/QuickStartGuide.txt >~/www/afl/QuickStartGuide.txt
echo -n "$(VERSION)" >~/www/afl/version.txt
#
# american fuzzy lop - makefile
# -----------------------------
#
# Written and maintained by Michal Zalewski <lcamtuf@google.com>
#
# Copyright 2013, 2014, 2015, 2016, 2017 Google LLC All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# 定义程序名称和版本
PROGNAME = afl
VERSION = $(shell grep '^\#define VERSION ' config.h | cut -d '"' -f2)
# 安装路径
PREFIX ?= /usr/local
BIN_PATH = $(PREFIX)/bin
HELPER_PATH = $(PREFIX)/lib/afl
DOC_PATH = $(PREFIX)/share/doc/afl
MISC_PATH = $(PREFIX)/share/afl
# 程序和脚本的定义
PROGS = afl-gcc afl-fuzz afl-showmap afl-tmin afl-gotcpu afl-analyze
SH_PROGS = afl-plot afl-cmin afl-whatsup
# 编译标志
CFLAGS ?= -O3 -funroll-loops
CFLAGS += -Wall -D_FORTIFY_SOURCE=2 -g -Wno-pointer-sign \
-DAFL_PATH=\"$(HELPER_PATH)\" -DDOC_PATH=\"$(DOC_PATH)\" \
-DBIN_PATH=\"$(BIN_PATH)\"
# Linux 平台的链接标志
ifneq "$(filter Linux GNU%,$(shell uname))" ""
LDFLAGS += -ldl
endif
# 判断是否使用 Clang 编译器
ifeq "$(findstring clang, $(shell $(CC) --version 2>/dev/null))" ""
TEST_CC = afl-gcc
else
TEST_CC = afl-clang
endif
# 公共头文件
COMM_HDR = alloc-inl.h config.h debug.h types.h
# 默认目标:编译和测试程序
all: test_x86 $(PROGS) afl-as test_build all_done
# 测试 x86 编译能力
ifndef AFL_NO_X86
test_x86:
@echo "[*] Checking for the ability to compile x86 code..."
@echo 'main() { __asm__("xorb %al, %al"); }' | $(CC) -w -x c - -o .test || ( echo; echo "Oops, looks like your compiler can't generate x86 code."; echo; echo "Don't panic! You can use the LLVM or QEMU mode, but see docs/INSTALL first."; echo "(To ignore this error, set AFL_NO_X86=1 and try again.)"; echo; exit 1 )
@rm -f .test
@echo "[+] Everything seems to be working, ready to compile."
else
test_x86:
@echo "[!] Note: skipping x86 compilation checks (AFL_NO_X86 set)."
endif
# 编译 afl-gcc
afl-gcc: afl-gcc.c $(COMM_HDR) | test_x86
$(CC) $(CFLAGS) $@.c -o $@ $(LDFLAGS)
set -e; for i in afl-g++ afl-clang afl-clang++; do ln -sf afl-gcc $$i; done
# 编译 afl-as
afl-as: afl-as.c afl-as.h $(COMM_HDR) | test_x86
$(CC) $(CFLAGS) $@.c -o $@ $(LDFLAGS)
ln -sf afl-as as
# 编译 afl-fuzz
afl-fuzz: afl-fuzz.c $(COMM_HDR) | test_x86
$(CC) $(CFLAGS) $@.c -o $@ $(LDFLAGS)
# 编译 afl-showmap
afl-showmap: afl-showmap.c $(COMM_HDR) | test_x86
$(CC) $(CFLAGS) $@.c -o $@ $(LDFLAGS)
# 编译 afl-tmin
afl-tmin: afl-tmin.c $(COMM_HDR) | test_x86
$(CC) $(CFLAGS) $@.c -o $@ $(LDFLAGS)
# 编译 afl-analyze
afl-analyze: afl-analyze.c $(COMM_HDR) | test_x86
$(CC) $(CFLAGS) $@.c -o $@ $(LDFLAGS)
# 编译 afl-gotcpu
afl-gotcpu: afl-gotcpu.c $(COMM_HDR) | test_x86
$(CC) $(CFLAGS) $@.c -o $@ $(LDFLAGS)
# 测试构建:检查插桩功能
ifndef AFL_NO_X86
test_build: afl-gcc afl-as afl-showmap
@echo "[*] Testing the CC wrapper and instrumentation output..."
unset AFL_USE_ASAN AFL_USE_MSAN; AFL_QUIET=1 AFL_INST_RATIO=100 AFL_PATH=. ./$(TEST_CC) $(CFLAGS) test-instr.c -o test-instr $(LDFLAGS)
./afl-showmap -m none -q -o .test-instr0 ./test-instr < /dev/null
echo 1 | ./afl-showmap -m none -q -o .test-instr1 ./test-instr
@rm -f test-instr
@cmp -s .test-instr0 .test-instr1; DR="$$?"; rm -f .test-instr0 .test-instr1; if [ "$$DR" = "0" ]; then echo; echo "Oops, the instrumentation does not seem to be behaving correctly!"; echo; echo "Please ping <lcamtuf@google.com> to troubleshoot the issue."; echo; exit 1; fi
@echo "[+] All right, the instrumentation seems to be working!"
else
test_build: afl-gcc afl-as afl-showmap
@echo "[!] Note: skipping build tests (you may need to use LLVM or QEMU mode)."
endif
# 完成构建
all_done: test_build
@if [ ! "`which clang 2>/dev/null`" = "" ]; then echo "[+] LLVM users: see llvm_mode/README.llvm for a faster alternative to afl-gcc."; fi
@echo "[+] All done! Be sure to review README - it's pretty short and useful."
@if [ "`uname`" = "Darwin" ]; then printf "\nWARNING: Fuzzing on MacOS X is slow because of the unusually high overhead of\nfork() on this OS. Consider using Linux or *BSD. You can also use VirtualBox\n(virtualbox.org) to put AFL inside a Linux or *BSD VM.\n\n"; fi
@! tty <&1 >/dev/null || printf "\033[0;30mNOTE: If you can read this, your terminal probably uses white background.\nThis will make the UI hard to read. See docs/status_screen.txt for advice.\033[0m\n" 2>/dev/null
# 清理构建生成的文件
.NOTPARALLEL: clean
clean:
rm -f $(PROGS) afl-as as afl-g++ afl-clang afl-clang++ *.o *~ a.out core core.[1-9][0-9]* *.stackdump test .test test-instr .test-instr0 .test-instr1 qemu_mode/qemu-2.10.0.tar.bz2 afl-qemu-trace
rm -rf out_dir qemu_mode/qemu-2.10.0
$(MAKE) -C llvm_mode clean
$(MAKE) -C libdislocator clean
$(MAKE) -C libtokencap clean
# 安装程序
install: all
mkdir -p -m 755 $${DESTDIR}$(BIN_PATH) $${DESTDIR}$(HELPER_PATH) $${DESTDIR}$(DOC_PATH) $${DESTDIR}$(MISC_PATH)
rm -f $${DESTDIR}$(BIN_PATH)/afl-plot.sh
install -m 755 $(PROGS) $(SH_PROGS) $${DESTDIR}$(BIN_PATH)
rm -f $${DESTDIR}$(BIN_PATH)/afl-as
if [ -f afl-qemu-trace ]; then install -m 755 afl-qemu-trace $${DESTDIR}$(BIN_PATH); fi
ifndef AFL_TRACE_PC
if [ -f afl-clang-fast -a -f afl-llvm-pass.so -a -f afl-llvm-rt.o ]; then set -e; install -m 755 afl-clang-fast $${DESTDIR}$(BIN_PATH); ln -sf afl-clang-fast $${DESTDIR}$(BIN_PATH)/afl-clang-fast++; install -m 755 afl-llvm-pass.so afl-llvm-rt.o $${DESTDIR}$(HELPER_PATH); fi
else
if [ -f afl-clang-fast -a -f afl-llvm-rt.o ]; then set -e; install -m 755 afl-clang-fast $${DESTDIR}$(BIN_PATH); ln -sf afl-clang-fast $${DESTDIR}$(BIN_PATH)/afl-clang-fast++; install -m 755 afl-llvm-rt.o $${DESTDIR}$(HELPER_PATH); fi
endif
@echo "[+] All done!"

@ -1,56 +1,56 @@
# american fuzzy lop
[![Build Status](https://travis-ci.org/google/AFL.svg?branch=master)](https://travis-ci.org/google/AFL)
Originally developed by Michal Zalewski <lcamtuf@google.com>.
See [QuickStartGuide.txt](docs/QuickStartGuide.txt) if you don't have time to read
this file.
[![Build Status](https://travis-ci.org/google/AFL.svg?branch=master)](https://travis-ci.org/google/AFL) # code
Originally developed by Michal Zalewski <lcamtuf@google.com>. # code
``````
See [QuickStartGuide.txt](docs/QuickStartGuide.txt) if you don't have time to read # code
this file. ``````
# code
## 1) Challenges of guided fuzzing
Fuzzing is one of the most powerful and proven strategies for identifying
security issues in real-world software; it is responsible for the vast
majority of remote code execution and privilege escalation bugs found to date
majority of remote code execution and privilege escalation bugs found to date # code
in security-critical software.
Unfortunately, fuzzing is also relatively shallow; blind, random mutations
Unfortunately, fuzzing is also relatively shallow; blind, random mutations # code
make it very unlikely to reach certain code paths in the tested code, leaving
some vulnerabilities firmly outside the reach of this technique.
# code
There have been numerous attempts to solve this problem. One of the early
approaches - pioneered by Tavis Ormandy - is corpus distillation. The method
relies on coverage signals to select a subset of interesting seeds from a
relies on coverage signals to select a subset of interesting seeds from a ``````
massive, high-quality corpus of candidate files, and then fuzz them by
traditional means. The approach works exceptionally well, but requires such
a corpus to be readily available. In addition, block coverage measurements
provide only a very simplistic understanding of program state, and are less
useful for guiding the fuzzing effort in the long haul.
useful for guiding the fuzzing effort in the long haul. # code
# code
Other, more sophisticated research has focused on techniques such as program
flow analysis ("concolic execution"), symbolic execution, or static analysis.
All these methods are extremely promising in experimental settings, but tend
to suffer from reliability and performance problems in practical uses - and
currently do not offer a viable alternative to "dumb" fuzzing techniques.
## 2) The afl-fuzz approach
## 2) The afl-fuzz approach # code
American Fuzzy Lop is a brute-force fuzzer coupled with an exceedingly simple
but rock-solid instrumentation-guided genetic algorithm. It uses a modified
form of edge coverage to effortlessly pick up subtle, local-scale changes to
program control flow.
program control flow. ``````
# code
Simplifying a bit, the overall algorithm can be summed up as:
1) Load user-supplied initial test cases into the queue,
2) Take next input file from the queue,
3) Attempt to trim the test case to the smallest size that doesn't alter
3) Attempt to trim the test case to the smallest size that doesn't alter # code
the measured behavior of the program,
4) Repeatedly mutate the file using a balanced and well-researched variety
of traditional fuzzing strategies,
of traditional fuzzing strategies, # code
5) If any of the generated mutations resulted in a new state transition
recorded by the instrumentation, add mutated output as a new entry in the
@ -58,9 +58,9 @@ Simplifying a bit, the overall algorithm can be summed up as:
6) Go to 2.
The discovered test cases are also periodically culled to eliminate ones that
The discovered test cases are also periodically culled to eliminate ones that ``````
have been obsoleted by newer, higher-coverage finds; and undergo several other
instrumentation-driven effort minimization steps.
instrumentation-driven effort minimization steps. # code
As a side result of the fuzzing process, the tool creates a small,
self-contained corpus of interesting test cases. These are extremely useful
@ -69,10 +69,10 @@ for stress-testing browsers, office applications, graphics suites, or
closed-source tools.
The fuzzer is thoroughly tested to deliver out-of-the-box performance far
superior to blind fuzzing or coverage-only tools.
superior to blind fuzzing or coverage-only tools. # code
## 3) Instrumenting programs for use with AFL
# code
When source code is available, instrumentation can be injected by a companion
tool that works as a drop-in replacement for gcc or clang in any standard build
process for third-party code.
@ -89,28 +89,28 @@ $ CC=/path/to/afl/afl-gcc ./configure
$ make clean all
```
For C++ programs, you'd would also want to set `CXX=/path/to/afl/afl-g++`.
For C++ programs, you'd would also want to set `CXX=/path/to/afl/afl-g++`. # code
The clang wrappers (afl-clang and afl-clang++) can be used in the same way;
clang users may also opt to leverage a higher-performance instrumentation mode,
clang users may also opt to leverage a higher-performance instrumentation mode, ``````
as described in llvm_mode/README.llvm.
``````
When testing libraries, you need to find or write a simple program that reads
data from stdin or from a file and passes it to the tested library. In such a
data from stdin or from a file and passes it to the tested library. In such a # code
case, it is essential to link this executable against a static version of the
instrumented library, or to make sure that the correct .so file is loaded at
runtime (usually by setting `LD_LIBRARY_PATH`). The simplest option is a static
runtime (usually by setting `LD_LIBRARY_PATH`). The simplest option is a static # code
build, usually possible via:
# code
```shell
$ CC=/path/to/afl/afl-gcc ./configure --disable-shared
```
Setting `AFL_HARDEN=1` when calling 'make' will cause the CC wrapper to
automatically enable code hardening options that make it easier to detect
simple memory bugs. Libdislocator, a helper library included with AFL (see
libdislocator/README.dislocator) can help uncover heap corruption issues, too.
simple memory bugs. Libdislocator, a helper library included with AFL (see # code
libdislocator/README.dislocator) can help uncover heap corruption issues, too. # code
# code
PS. ASAN users are advised to review [notes_for_asan.txt](docs/notes_for_asan.txt) file for important
caveats.
@ -118,36 +118,36 @@ caveats.
When source code is *NOT* available, the fuzzer offers experimental support for
fast, on-the-fly instrumentation of black-box binaries. This is accomplished
with a version of QEMU running in the lesser-known "user space emulation" mode.
with a version of QEMU running in the lesser-known "user space emulation" mode. # code
QEMU is a project separate from AFL, but you can conveniently build the
feature by doing:
feature by doing: ``````
# code
```shell
$ cd qemu_mode
$ ./build_qemu_support.sh
```
# code
For additional instructions and caveats, see qemu_mode/README.qemu.
The mode is approximately 2-5x slower than compile-time instrumentation, is
less conducive to parallelization, and may have some other quirks.
less conducive to parallelization, and may have some other quirks. # code
## 5) Choosing initial test cases
## 5) Choosing initial test cases ``````
To operate correctly, the fuzzer requires one or more starting file that
contains a good example of the input data normally expected by the targeted
contains a good example of the input data normally expected by the targeted # code
application. There are two basic rules:
- Keep the files small. Under 1 kB is ideal, although not strictly necessary.
- Keep the files small. Under 1 kB is ideal, although not strictly necessary. # code
For a discussion of why size matters, see [perf_tips.txt](docs/perf_tips.txt).
- Use multiple test cases only if they are functionally different from
each other. There is no point in using fifty different vacation photos
to fuzz an image library.
You can find many good examples of starting files in the testcases/ subdirectory
that comes with this tool.
You can find many good examples of starting files in the testcases/ subdirectory # code
that comes with this tool. ``````
PS. If a large corpus of data is available for screening, you may want to use
the afl-cmin utility to identify a subset of functionally distinct files that
@ -155,82 +155,82 @@ exercise different code paths in the target binary.
## 6) Fuzzing binaries
The fuzzing process itself is carried out by the afl-fuzz utility. This program
requires a read-only directory with initial test cases, a separate place to
store its findings, plus a path to the binary to test.
For target binaries that accept input directly from stdin, the usual syntax is:
The fuzzing process itself is carried out by the afl-fuzz utility. This program ``````
requires a read-only directory with initial test cases, a separate place to # code
store its findings, plus a path to the binary to test. # code
For target binaries that accept input directly from stdin, the usual syntax is: ``````
``````
```shell
$ ./afl-fuzz -i testcase_dir -o findings_dir /path/to/program [...params...]
```
``` ``````
# code
For programs that take input from a file, use '@@' to mark the location in
the target's command line where the input file name should be placed. The
fuzzer will substitute this for you:
```shell
```shell # code
$ ./afl-fuzz -i testcase_dir -o findings_dir /path/to/program @@
```
You can also use the -f option to have the mutated data written to a specific
file. This is useful if the program expects a particular file extension or so.
You can also use the -f option to have the mutated data written to a specific # code
file. This is useful if the program expects a particular file extension or so. ``````
Non-instrumented binaries can be fuzzed in the QEMU mode (add -Q in the command
line) or in a traditional, blind-fuzzer mode (specify -n).
You can use -t and -m to override the default timeout and memory limit for the
executed process; rare examples of targets that may need these settings touched
include compilers and video decoders.
include compilers and video decoders. # code
Tips for optimizing fuzzing performance are discussed in [perf_tips.txt](docs/perf_tips.txt).
Note that afl-fuzz starts by performing an array of deterministic fuzzing
Note that afl-fuzz starts by performing an array of deterministic fuzzing ``````
steps, which can take several days, but tend to produce neat test cases. If you
want quick & dirty results right away - akin to zzuf and other traditional
fuzzers - add the -d option to the command line.
fuzzers - add the -d option to the command line. # code
``````
## 7) Interpreting output
See the [status_screen.txt](docs/status_screen.txt) file for information on
how to interpret the displayed stats and monitor the health of the process.
Be sure to consult this file especially if any UI elements are highlighted in
red.
# code
The fuzzing process will continue until you press Ctrl-C. At minimum, you want
to allow the fuzzer to complete one queue cycle, which may take anywhere from a
couple of hours to a week or so.
There are three subdirectories created within the output directory and updated
# code
There are three subdirectories created within the output directory and updated # code
in real time:
- queue/ - test cases for every distinctive execution path, plus all the
starting files given by the user. This is the synthesized corpus
starting files given by the user. This is the synthesized corpus ``````
mentioned in section 2.
Before using this corpus for any other purposes, you can shrink
Before using this corpus for any other purposes, you can shrink # code
it to a smaller size using the afl-cmin tool. The tool will find
a smaller subset of files offering equivalent edge coverage.
- crashes/ - unique test cases that cause the tested program to receive a
fatal signal (e.g., SIGSEGV, SIGILL, SIGABRT). The entries are
fatal signal (e.g., SIGSEGV, SIGILL, SIGABRT). The entries are # code
grouped by the received signal.
- hangs/ - unique test cases that cause the tested program to time out. The
default time limit before something is classified as a hang is
the larger of 1 second and the value of the -t parameter.
the larger of 1 second and the value of the -t parameter. # code
The value can be fine-tuned by setting AFL_HANG_TMOUT, but this
is rarely necessary.
is rarely necessary. # code
Crashes and hangs are considered "unique" if the associated execution paths
involve any state transitions not seen in previously-recorded faults. If a
single bug can be reached in multiple ways, there will be some count inflation
early in the process, but this should quickly taper off.
early in the process, but this should quickly taper off. # code
The file names for crashes and hangs are correlated with parent, non-faulting
queue entries. This should help with debugging.
When you can't reproduce a crash found by afl-fuzz, the most likely cause is
that you are not setting the same memory limit as used by the tool. Try:
When you can't reproduce a crash found by afl-fuzz, the most likely cause is # code
that you are not setting the same memory limit as used by the tool. Try: # code
```shell
$ LIMIT_MB=50
@ -243,28 +243,28 @@ also change -Sv to -Sd.
Any existing output directory can be also used to resume aborted jobs; try:
```shell
$ ./afl-fuzz -i- -o existing_output_dir [...etc...]
```
$ ./afl-fuzz -i- -o existing_output_dir [...etc...] # code
``` # code
If you have gnuplot installed, you can also generate some pretty graphs for any
If you have gnuplot installed, you can also generate some pretty graphs for any # code
active fuzzing task using afl-plot. For an example of how this looks like,
see [http://lcamtuf.coredump.cx/afl/plot/](http://lcamtuf.coredump.cx/afl/plot/).
## 8) Parallelized fuzzing
Every instance of afl-fuzz takes up roughly one core. This means that on
multi-core systems, parallelization is necessary to fully utilize the hardware.
multi-core systems, parallelization is necessary to fully utilize the hardware. # code
For tips on how to fuzz a common target on multiple cores or multiple networked
machines, please refer to [parallel_fuzzing.txt](docs/parallel_fuzzing.txt).
The parallel fuzzing mode also offers a simple way for interfacing AFL to other
fuzzers, to symbolic or concolic execution engines, and so forth; again, see the
fuzzers, to symbolic or concolic execution engines, and so forth; again, see the # code
last section of [parallel_fuzzing.txt](docs/parallel_fuzzing.txt) for tips.
## 9) Fuzzer dictionaries
By default, afl-fuzz mutation engine is optimized for compact data formats -
say, images, multimedia, compressed data, regular expression syntax, or shell
say, images, multimedia, compressed data, regular expression syntax, or shell # code
scripts. It is somewhat less suited for languages with particularly verbose and
redundant verbiage - notably including HTML, SQL, or JavaScript.
@ -277,48 +277,48 @@ magic headers, or other special tokens associated with the targeted data type
To use this feature, you first need to create a dictionary in one of the two
formats discussed in dictionaries/README.dictionaries; and then point the fuzzer
to it via the -x option in the command line.
to it via the -x option in the command line. # code
``````
(Several common dictionaries are already provided in that subdirectory, too.)
There is no way to provide more structured descriptions of the underlying
syntax, but the fuzzer will likely figure out some of this based on the
instrumentation feedback alone. This actually works in practice, say:
[http://lcamtuf.blogspot.com/2015/04/finding-bugs-in-sqlite-easy-way.html](http://lcamtuf.blogspot.com/2015/04/finding-bugs-in-sqlite-easy-way.html)
PS. Even when no explicit dictionary is given, afl-fuzz will try to extract
[http://lcamtuf.blogspot.com/2015/04/finding-bugs-in-sqlite-easy-way.html](http://lcamtuf.blogspot.com/2015/04/finding-bugs-in-sqlite-easy-way.html) ``````
``````
PS. Even when no explicit dictionary is given, afl-fuzz will try to extract ``````
existing syntax tokens in the input corpus by watching the instrumentation
very closely during deterministic byte flips. This works for some types of
parsers and grammars, but isn't nearly as good as the -x mode.
If a dictionary is really hard to come by, another option is to let AFL run
for a while, and then use the token capture library that comes as a companion
utility with AFL. For that, see libtokencap/README.tokencap.
utility with AFL. For that, see libtokencap/README.tokencap. # code
# code
## 10) Crash triage
The coverage-based grouping of crashes usually produces a small data set that
can be quickly triaged manually or with a very simple GDB or Valgrind script.
Every crash is also traceable to its parent non-crashing test case in the
queue, making it easier to diagnose faults.
queue, making it easier to diagnose faults. # code
Having said that, it's important to acknowledge that some fuzzing crashes can be
difficult to quickly evaluate for exploitability without a lot of debugging and
Having said that, it's important to acknowledge that some fuzzing crashes can be # code
difficult to quickly evaluate for exploitability without a lot of debugging and # code
code analysis work. To assist with this task, afl-fuzz supports a very unique
"crash exploration" mode enabled with the -C flag.
"crash exploration" mode enabled with the -C flag. # code
# code
In this mode, the fuzzer takes one or more crashing test cases as the input,
and uses its feedback-driven fuzzing strategies to very quickly enumerate all
code paths that can be reached in the program while keeping it in the
crashing state.
crashing state. ``````
Mutations that do not result in a crash are rejected; so are any changes that
do not affect the execution path.
do not affect the execution path. ``````
The output is a small corpus of files that can be very rapidly examined to see
what degree of control the attacker has over the faulting address, or whether
it is possible to get past an initial out-of-bounds read - and see what lies
The output is a small corpus of files that can be very rapidly examined to see # code
what degree of control the attacker has over the faulting address, or whether # code
it is possible to get past an initial out-of-bounds read - and see what lies # code
beneath.
Oh, one more thing: for test case minimization, give afl-tmin a try. The tool
@ -329,19 +329,19 @@ $ ./afl-tmin -i test_case -o minimized_result -- /path/to/program [...]
```
The tool works with crashing and non-crashing test cases alike. In the crash
mode, it will happily accept instrumented and non-instrumented binaries. In the
mode, it will happily accept instrumented and non-instrumented binaries. In the ``````
non-crashing mode, the minimizer relies on standard AFL instrumentation to make
the file simpler without altering the execution path.
the file simpler without altering the execution path. # code
The minimizer accepts the -m, -t, -f and @@ syntax in a manner compatible with
afl-fuzz.
# code
Another recent addition to AFL is the afl-analyze tool. It takes an input
file, attempts to sequentially flip bytes, and observes the behavior of the
file, attempts to sequentially flip bytes, and observes the behavior of the # code
tested program. It then color-codes the input based on which sections appear to
be critical, and which are not; while not bulletproof, it can often offer quick
insights into complex file formats. More info about its operation can be found
near the end of [technical_details.txt](docs/technical_details.txt).
near the end of [technical_details.txt](docs/technical_details.txt). # code
## 11) Going beyond crashes
@ -352,21 +352,21 @@ found by modifying the target programs to call abort() when, say:
- Two bignum libraries produce different outputs when given the same
fuzzer-generated input,
- An image library produces different outputs when asked to decode the same
- An image library produces different outputs when asked to decode the same # code
input image several times in a row,
- A serialization / deserialization library fails to produce stable outputs
- A serialization / deserialization library fails to produce stable outputs ``````
when iteratively serializing and deserializing fuzzer-supplied data,
- A compression library produces an output inconsistent with the input file
when asked to compress and then decompress a particular blob.
Implementing these or similar sanity checks usually takes very little time;
if you are the maintainer of a particular package, you can make this code
if you are the maintainer of a particular package, you can make this code # code
conditional with `#ifdef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION` (a flag also
shared with libfuzzer) or `#ifdef __AFL_COMPILER` (this one is just for AFL).
## 12) Common-sense risks
## 12) Common-sense risks ``````
Please keep in mind that, similarly to many other computationally-intensive
tasks, fuzzing may put strain on your hardware and on the OS. In particular:
@ -379,19 +379,19 @@ tasks, fuzzing may put strain on your hardware and on the OS. In particular:
- Targeted programs may end up erratically grabbing gigabytes of memory or
filling up disk space with junk files. AFL tries to enforce basic memory
limits, but can't prevent each and every possible mishap. The bottom line
is that you shouldn't be fuzzing on systems where the prospect of data loss
is not an acceptable risk.
limits, but can't prevent each and every possible mishap. The bottom line # code
is that you shouldn't be fuzzing on systems where the prospect of data loss # code
is not an acceptable risk. # code
- Fuzzing involves billions of reads and writes to the filesystem. On modern
systems, this will be usually heavily cached, resulting in fairly modest
"physical" I/O - but there are many factors that may alter this equation.
systems, this will be usually heavily cached, resulting in fairly modest # code
"physical" I/O - but there are many factors that may alter this equation. # code
It is your responsibility to monitor for potential trouble; with very heavy
I/O, the lifespan of many HDDs and SSDs may be reduced.
A good way to monitor disk I/O on Linux is the 'iostat' command:
A good way to monitor disk I/O on Linux is the 'iostat' command: # code
```shell
```shell # code
$ iostat -d 3 -x -k [...optional disk ID...]
```
@ -403,34 +403,34 @@ Here are some of the most important caveats for AFL:
a signal (SIGSEGV, SIGABRT, etc). Programs that install custom handlers for
these signals may need to have the relevant code commented out. In the same
vein, faults in child processed spawned by the fuzzed target may evade
detection unless you manually add some code to catch that.
detection unless you manually add some code to catch that. # code
- As with any other brute-force tool, the fuzzer offers limited coverage if
encryption, checksums, cryptographic signatures, or compression are used to
wholly wrap the actual data format to be tested.
To work around this, you can comment out the relevant checks (see
To work around this, you can comment out the relevant checks (see # code
experimental/libpng_no_checksum/ for inspiration); if this is not possible,
you can also write a postprocessor, as explained in
you can also write a postprocessor, as explained in # code
experimental/post_library/.
- There are some unfortunate trade-offs with ASAN and 64-bit binaries. This
isn't due to any specific fault of afl-fuzz; see [notes_for_asan.txt](docs/notes_for_asan.txt)
for tips.
for tips. # code
- There is no direct support for fuzzing network services, background
- There is no direct support for fuzzing network services, background ``````
daemons, or interactive apps that require UI interaction to work. You may
need to make simple code changes to make them behave in a more traditional
way. Preeny may offer a relatively simple option, too - see:
way. Preeny may offer a relatively simple option, too - see: ``````
https://github.com/zardus/preeny
Some useful tips for modifying network-based services can be also found at:
Some useful tips for modifying network-based services can be also found at: # code
https://www.fastly.com/blog/how-to-fuzz-server-american-fuzzy-lop
- AFL doesn't output human-readable coverage data. If you want to monitor
coverage, use afl-cov from Michael Rash: https://github.com/mrash/afl-cov
- Occasionally, sentient machines rise against their creators. If this
# code
- Occasionally, sentient machines rise against their creators. If this ``````
happens to you, please consult http://lcamtuf.coredump.cx/prep/.
Beyond this, see INSTALL for platform-specific tips.
@ -438,56 +438,56 @@ Beyond this, see INSTALL for platform-specific tips.
## 14) Special thanks
Many of the improvements to afl-fuzz wouldn't be possible without feedback,
bug reports, or patches from:
bug reports, or patches from: # code
```
Jann Horn Hanno Boeck
Felix Groebert Jakub Wilk
Felix Groebert Jakub Wilk # code
Richard W. M. Jones Alexander Cherepanov
Tom Ritter Hovik Manucharyan
Sebastian Roschke Eberhard Mattes
Padraig Brady Ben Laurie
@dronesec Luca Barbato
Sebastian Roschke Eberhard Mattes ``````
Padraig Brady Ben Laurie # code
@dronesec Luca Barbato # code
Tobias Ospelt Thomas Jarosch
Martin Carpenter Mudge Zatko
Joe Zbiciak Ryan Govostes
Michael Rash William Robinet
Michael Rash William Robinet # code
Jonathan Gray Filipe Cabecinhas
Nico Weber Jodie Cunningham
Andrew Griffiths Parker Thompson
Jonathan Neuschfer Tyler Nighswander
Ben Nagy Samir Aguiar
Aidan Thornton Aleksandar Nikolich
Ben Nagy Samir Aguiar ``````
Aidan Thornton Aleksandar Nikolich # code
Sam Hakim Laszlo Szekeres
David A. Wheeler Turo Lamminen
Andreas Stieger Richard Godbee
Louis Dassy teor2345
Louis Dassy teor2345 ``````
Alex Moneger Dmitry Vyukov
Keegan McAllister Kostya Serebryany
Keegan McAllister Kostya Serebryany # code
Richo Healey Martijn Bogaard
rc0r Jonathan Foote
Christian Holler Dominique Pelle
Jacek Wielemborek Leo Barnes
Jacek Wielemborek Leo Barnes # code
Jeremy Barnes Jeff Trull
Guillaume Endignoux ilovezfs
Daniel Godas-Lopez Franjo Ivancic
Austin Seipp Daniel Komaromy
Daniel Godas-Lopez Franjo Ivancic # code
Austin Seipp Daniel Komaromy # code
Daniel Binderman Jonathan Metzman
Vegard Nossum Jan Kneschke
Kurt Roeckx Marcel Bohme
Van-Thuan Pham Abhik Roychoudhury
Joshua J. Drake Toby Hutton
Rene Freingruber Sergey Davidoff
Joshua J. Drake Toby Hutton ``````
Rene Freingruber Sergey Davidoff ``````
Sami Liedes Craig Young
Andrzej Jackowski Daniel Hodson
```
Thank you!
Thank you! ``````
## 15) Contact
Questions? Concerns? Bug reports? Please use GitHub.
# code
Questions? Concerns? Bug reports? Please use GitHub. ``````
There is also a mailing list for the project; to join, send a mail to
<afl-users+subscribe@googlegroups.com>. Or, if you prefer to browse
<afl-users+subscribe@googlegroups.com>. Or, if you prefer to browse ``````
archives first, try: [https://groups.google.com/group/afl-users](https://groups.google.com/group/afl-users).

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

@ -1,461 +1,461 @@
#!/usr/bin/env bash
#
# american fuzzy lop - corpus minimization tool
# ---------------------------------------------
#
# Written and maintained by Michal Zalewski <lcamtuf@google.com>
#
# Copyright 2014, 2015 Google LLC All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# This tool tries to find the smallest subset of files in the input directory
# that still trigger the full range of instrumentation data points seen in
# the starting corpus. This has two uses:
#
# - Screening large corpora of input files before using them as a seed for
# afl-fuzz. The tool will remove functionally redundant files and likely
# leave you with a much smaller set.
#
# (In this case, you probably also want to consider running afl-tmin on
# the individual files later on to reduce their size.)
#
# - Minimizing the corpus generated organically by afl-fuzz, perhaps when
# planning to feed it to more resource-intensive tools. The tool achieves
# this by removing all entries that used to trigger unique behaviors in the
# past, but have been made obsolete by later finds.
#
# Note that the tool doesn't modify the files themselves. For that, you want
# afl-tmin.
#
# This script must use bash because other shells may have hardcoded limits on
# array sizes.
#
echo "corpus minimization tool for afl-fuzz by <lcamtuf@google.com>"
echo
#########
# SETUP #
#########
# Process command-line options...
MEM_LIMIT=100
TIMEOUT=none
unset IN_DIR OUT_DIR STDIN_FILE EXTRA_PAR MEM_LIMIT_GIVEN \
AFL_CMIN_CRASHES_ONLY AFL_CMIN_ALLOW_ANY QEMU_MODE
while getopts "+i:o:f:m:t:eQC" opt; do
case "$opt" in
"i")
IN_DIR="$OPTARG"
;;
"o")
OUT_DIR="$OPTARG"
;;
"f")
STDIN_FILE="$OPTARG"
;;
"m")
MEM_LIMIT="$OPTARG"
MEM_LIMIT_GIVEN=1
;;
"t")
TIMEOUT="$OPTARG"
;;
"e")
EXTRA_PAR="$EXTRA_PAR -e"
;;
"C")
export AFL_CMIN_CRASHES_ONLY=1
;;
"Q")
EXTRA_PAR="$EXTRA_PAR -Q"
test "$MEM_LIMIT_GIVEN" = "" && MEM_LIMIT=250
QEMU_MODE=1
;;
"?")
exit 1
;;
esac
done
shift $((OPTIND-1))
TARGET_BIN="$1"
if [ "$TARGET_BIN" = "" -o "$IN_DIR" = "" -o "$OUT_DIR" = "" ]; then
cat 1>&2 <<_EOF_
Usage: $0 [ options ] -- /path/to/target_app [ ... ]
Required parameters:
-i dir - input directory with the starting corpus
-o dir - output directory for minimized files
Execution control settings:
-f file - location read by the fuzzed program (stdin)
-m megs - memory limit for child process ($MEM_LIMIT MB)
-t msec - run time limit for child process (none)
-Q - use binary-only instrumentation (QEMU mode)
Minimization settings:
-C - keep crashing inputs, reject everything else
-e - solve for edge coverage only, ignore hit counts
For additional tips, please consult docs/README.
_EOF_
exit 1
fi
# Do a sanity check to discourage the use of /tmp, since we can't really
# handle this safely from a shell script.
if [ "$AFL_ALLOW_TMP" = "" ]; then
echo "$IN_DIR" | grep -qE '^(/var)?/tmp/'
T1="$?"
echo "$TARGET_BIN" | grep -qE '^(/var)?/tmp/'
T2="$?"
echo "$OUT_DIR" | grep -qE '^(/var)?/tmp/'
T3="$?"
echo "$STDIN_FILE" | grep -qE '^(/var)?/tmp/'
T4="$?"
echo "$PWD" | grep -qE '^(/var)?/tmp/'
T5="$?"
if [ "$T1" = "0" -o "$T2" = "0" -o "$T3" = "0" -o "$T4" = "0" -o "$T5" = "0" ]; then
echo "[-] Error: do not use this script in /tmp or /var/tmp." 1>&2
exit 1
fi
fi
# If @@ is specified, but there's no -f, let's come up with a temporary input
# file name.
TRACE_DIR="$OUT_DIR/.traces"
if [ "$STDIN_FILE" = "" ]; then
if echo "$*" | grep -qF '@@'; then
STDIN_FILE="$TRACE_DIR/.cur_input"
fi
fi
# Check for obvious errors.
if [ ! "$MEM_LIMIT" = "none" ]; then
if [ "$MEM_LIMIT" -lt "5" ]; then
echo "[-] Error: dangerously low memory limit." 1>&2
exit 1
fi
fi
if [ ! "$TIMEOUT" = "none" ]; then
if [ "$TIMEOUT" -lt "10" ]; then
echo "[-] Error: dangerously low timeout." 1>&2
exit 1
fi
fi
if [ ! -f "$TARGET_BIN" -o ! -x "$TARGET_BIN" ]; then
TNEW="`which "$TARGET_BIN" 2>/dev/null`"
if [ ! -f "$TNEW" -o ! -x "$TNEW" ]; then
echo "[-] Error: binary '$TARGET_BIN' not found or not executable." 1>&2
exit 1
fi
TARGET_BIN="$TNEW"
fi
if [ "$AFL_SKIP_BIN_CHECK" = "" -a "$QEMU_MODE" = "" ]; then
if ! grep -qF "__AFL_SHM_ID" "$TARGET_BIN"; then
echo "[-] Error: binary '$TARGET_BIN' doesn't appear to be instrumented." 1>&2
exit 1
fi
fi
if [ ! -d "$IN_DIR" ]; then
echo "[-] Error: directory '$IN_DIR' not found." 1>&2
exit 1
fi
test -d "$IN_DIR/queue" && IN_DIR="$IN_DIR/queue"
find "$OUT_DIR" -name 'id[:_]*' -maxdepth 1 -exec rm -- {} \; 2>/dev/null
rm -rf "$TRACE_DIR" 2>/dev/null
rmdir "$OUT_DIR" 2>/dev/null
if [ -d "$OUT_DIR" ]; then
echo "[-] Error: directory '$OUT_DIR' exists and is not empty - delete it first." 1>&2
exit 1
fi
mkdir -m 700 -p "$TRACE_DIR" || exit 1
if [ ! "$STDIN_FILE" = "" ]; then
rm -f "$STDIN_FILE" || exit 1
touch "$STDIN_FILE" || exit 1
fi
if [ "$AFL_PATH" = "" ]; then
SHOWMAP="${0%/afl-cmin}/afl-showmap"
else
SHOWMAP="$AFL_PATH/afl-showmap"
fi
if [ ! -x "$SHOWMAP" ]; then
echo "[-] Error: can't find 'afl-showmap' - please set AFL_PATH." 1>&2
rm -rf "$TRACE_DIR"
exit 1
fi
IN_COUNT=$((`ls -- "$IN_DIR" 2>/dev/null | wc -l`))
if [ "$IN_COUNT" = "0" ]; then
echo "[+] Hmm, no inputs in the target directory. Nothing to be done."
rm -rf "$TRACE_DIR"
exit 1
fi
FIRST_FILE=`ls "$IN_DIR" | head -1`
# Make sure that we're not dealing with a directory.
if [ -d "$IN_DIR/$FIRST_FILE" ]; then
echo "[-] Error: The target directory contains subdirectories - please fix." 1>&2
rm -rf "$TRACE_DIR"
exit 1
fi
# Check for the more efficient way to copy files...
if ln "$IN_DIR/$FIRST_FILE" "$TRACE_DIR/.link_test" 2>/dev/null; then
CP_TOOL=ln
else
CP_TOOL=cp
fi
# Make sure that we can actually get anything out of afl-showmap before we
# waste too much time.
echo "[*] Testing the target binary..."
if [ "$STDIN_FILE" = "" ]; then
AFL_CMIN_ALLOW_ANY=1 "$SHOWMAP" -m "$MEM_LIMIT" -t "$TIMEOUT" -o "$TRACE_DIR/.run_test" -Z $EXTRA_PAR -- "$@" <"$IN_DIR/$FIRST_FILE"
else
cp "$IN_DIR/$FIRST_FILE" "$STDIN_FILE"
AFL_CMIN_ALLOW_ANY=1 "$SHOWMAP" -m "$MEM_LIMIT" -t "$TIMEOUT" -o "$TRACE_DIR/.run_test" -Z $EXTRA_PAR -A "$STDIN_FILE" -- "$@" </dev/null
fi
FIRST_COUNT=$((`grep -c . "$TRACE_DIR/.run_test"`))
if [ "$FIRST_COUNT" -gt "0" ]; then
echo "[+] OK, $FIRST_COUNT tuples recorded."
else
echo "[-] Error: no instrumentation output detected (perhaps crash or timeout)." 1>&2
test "$AFL_KEEP_TRACES" = "" && rm -rf "$TRACE_DIR"
exit 1
fi
# Let's roll!
#############################
# STEP 1: COLLECTING TRACES #
#############################
echo "[*] Obtaining traces for input files in '$IN_DIR'..."
(
CUR=0
if [ "$STDIN_FILE" = "" ]; then
while read -r fn; do
CUR=$((CUR+1))
printf "\\r Processing file $CUR/$IN_COUNT... "
"$SHOWMAP" -m "$MEM_LIMIT" -t "$TIMEOUT" -o "$TRACE_DIR/$fn" -Z $EXTRA_PAR -- "$@" <"$IN_DIR/$fn"
done < <(ls "$IN_DIR")
else
while read -r fn; do
CUR=$((CUR+1))
printf "\\r Processing file $CUR/$IN_COUNT... "
cp "$IN_DIR/$fn" "$STDIN_FILE"
"$SHOWMAP" -m "$MEM_LIMIT" -t "$TIMEOUT" -o "$TRACE_DIR/$fn" -Z $EXTRA_PAR -A "$STDIN_FILE" -- "$@" </dev/null
done < <(ls "$IN_DIR")
fi
)
echo
##########################
# STEP 2: SORTING TUPLES #
##########################
# With this out of the way, we sort all tuples by popularity across all
# datasets. The reasoning here is that we won't be able to avoid the files
# that trigger unique tuples anyway, so we will want to start with them and
# see what's left.
echo "[*] Sorting trace sets (this may take a while)..."
ls "$IN_DIR" | sed "s#^#$TRACE_DIR/#" | tr '\n' '\0' | xargs -0 -n 1 cat | \
sort | uniq -c | sort -n >"$TRACE_DIR/.all_uniq"
TUPLE_COUNT=$((`grep -c . "$TRACE_DIR/.all_uniq"`))
echo "[+] Found $TUPLE_COUNT unique tuples across $IN_COUNT files."
#####################################
# STEP 3: SELECTING CANDIDATE FILES #
#####################################
# The next step is to find the best candidate for each tuple. The "best"
# part is understood simply as the smallest input that includes a particular
# tuple in its trace. Empirical evidence suggests that this produces smaller
# datasets than more involved algorithms that could be still pulled off in
# a shell script.
echo "[*] Finding best candidates for each tuple..."
CUR=0
while read -r fn; do
CUR=$((CUR+1))
printf "\\r Processing file $CUR/$IN_COUNT... "
sed "s#\$# $fn#" "$TRACE_DIR/$fn" >>"$TRACE_DIR/.candidate_list"
done < <(ls -rS "$IN_DIR")
echo
##############################
# STEP 4: LOADING CANDIDATES #
##############################
# At this point, we have a file of tuple-file pairs, sorted by file size
# in ascending order (as a consequence of ls -rS). By doing sort keyed
# only by tuple (-k 1,1) and configured to output only the first line for
# every key (-s -u), we end up with the smallest file for each tuple.
echo "[*] Sorting candidate list (be patient)..."
sort -k1,1 -s -u "$TRACE_DIR/.candidate_list" | \
sed 's/^/BEST_FILE[/;s/ /]="/;s/$/"/' >"$TRACE_DIR/.candidate_script"
if [ ! -s "$TRACE_DIR/.candidate_script" ]; then
echo "[-] Error: no traces obtained from test cases, check syntax!" 1>&2
test "$AFL_KEEP_TRACES" = "" && rm -rf "$TRACE_DIR"
exit 1
fi
# The sed command converted the sorted list to a shell script that populates
# BEST_FILE[tuple]="fname". Let's load that!
. "$TRACE_DIR/.candidate_script"
##########################
# STEP 5: WRITING OUTPUT #
##########################
# The final trick is to grab the top pick for each tuple, unless said tuple is
# already set due to the inclusion of an earlier candidate; and then put all
# tuples associated with the newly-added file to the "already have" list. The
# loop works from least popular tuples and toward the most common ones.
echo "[*] Processing candidates and writing output files..."
CUR=0
touch "$TRACE_DIR/.already_have"
while read -r cnt tuple; do
CUR=$((CUR+1))
printf "\\r Processing tuple $CUR/$TUPLE_COUNT... "
# If we already have this tuple, skip it.
grep -q "^$tuple\$" "$TRACE_DIR/.already_have" && continue
FN=${BEST_FILE[tuple]}
$CP_TOOL "$IN_DIR/$FN" "$OUT_DIR/$FN"
if [ "$((CUR % 5))" = "0" ]; then
sort -u "$TRACE_DIR/$FN" "$TRACE_DIR/.already_have" >"$TRACE_DIR/.tmp"
mv -f "$TRACE_DIR/.tmp" "$TRACE_DIR/.already_have"
else
cat "$TRACE_DIR/$FN" >>"$TRACE_DIR/.already_have"
fi
done <"$TRACE_DIR/.all_uniq"
echo
OUT_COUNT=`ls -- "$OUT_DIR" | wc -l`
if [ "$OUT_COUNT" = "1" ]; then
echo "[!] WARNING: All test cases had the same traces, check syntax!"
fi
echo "[+] Narrowed down to $OUT_COUNT files, saved in '$OUT_DIR'."
echo
test "$AFL_KEEP_TRACES" = "" && rm -rf "$TRACE_DIR"
exit 0
#!/usr/bin/env bash
#
# american fuzzy lop - corpus minimization tool
# ---------------------------------------------
#
# Written and maintained by Michal Zalewski <lcamtuf@google.com>
#
# Copyright 2014, 2015 Google LLC All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# This tool tries to find the smallest subset of files in the input directory
# that still trigger the full range of instrumentation data points seen in
# the starting corpus. This has two uses:
#
# - Screening large corpora of input files before using them as a seed for
# afl-fuzz. The tool will remove functionally redundant files and likely
# leave you with a much smaller set.
#
# (In this case, you probably also want to consider running afl-tmin on
# the individual files later on to reduce their size.)
#
# - Minimizing the corpus generated organically by afl-fuzz, perhaps when
# planning to feed it to more resource-intensive tools. The tool achieves
# this by removing all entries that used to trigger unique behaviors in the
# past, but have been made obsolete by later finds.
#
# Note that the tool doesn't modify the files themselves. For that, you want
# afl-tmin.
#
# This script must use bash because other shells may have hardcoded limits on
# array sizes.
#
echo "corpus minimization tool for afl-fuzz by <lcamtuf@google.com>"
echo
#########
# SETUP #
#########
# Process command-line options...
MEM_LIMIT=100
TIMEOUT=none
unset IN_DIR OUT_DIR STDIN_FILE EXTRA_PAR MEM_LIMIT_GIVEN \
AFL_CMIN_CRASHES_ONLY AFL_CMIN_ALLOW_ANY QEMU_MODE
while getopts "+i:o:f:m:t:eQC" opt; do
case "$opt" in
"i")
IN_DIR="$OPTARG"
;;
"o")
OUT_DIR="$OPTARG"
;;
"f")
STDIN_FILE="$OPTARG"
;;
"m")
MEM_LIMIT="$OPTARG"
MEM_LIMIT_GIVEN=1
;;
"t")
TIMEOUT="$OPTARG"
;;
"e")
EXTRA_PAR="$EXTRA_PAR -e"
;;
"C")
export AFL_CMIN_CRASHES_ONLY=1
;;
"Q")
EXTRA_PAR="$EXTRA_PAR -Q"
test "$MEM_LIMIT_GIVEN" = "" && MEM_LIMIT=250
QEMU_MODE=1
;;
"?")
exit 1
;;
esac
done
shift $((OPTIND-1))
TARGET_BIN="$1"
if [ "$TARGET_BIN" = "" -o "$IN_DIR" = "" -o "$OUT_DIR" = "" ]; then
cat 1>&2 <<_EOF_
Usage: $0 [ options ] -- /path/to/target_app [ ... ]
Required parameters:
-i dir - input directory with the starting corpus
-o dir - output directory for minimized files
Execution control settings:
-f file - location read by the fuzzed program (stdin)
-m megs - memory limit for child process ($MEM_LIMIT MB)
-t msec - run time limit for child process (none)
-Q - use binary-only instrumentation (QEMU mode)
Minimization settings:
-C - keep crashing inputs, reject everything else
-e - solve for edge coverage only, ignore hit counts
For additional tips, please consult docs/README.
_EOF_
exit 1
fi
# Do a sanity check to discourage the use of /tmp, since we can't really
# handle this safely from a shell script.
if [ "$AFL_ALLOW_TMP" = "" ]; then
echo "$IN_DIR" | grep -qE '^(/var)?/tmp/'
T1="$?"
echo "$TARGET_BIN" | grep -qE '^(/var)?/tmp/'
T2="$?"
echo "$OUT_DIR" | grep -qE '^(/var)?/tmp/'
T3="$?"
echo "$STDIN_FILE" | grep -qE '^(/var)?/tmp/'
T4="$?"
echo "$PWD" | grep -qE '^(/var)?/tmp/'
T5="$?"
if [ "$T1" = "0" -o "$T2" = "0" -o "$T3" = "0" -o "$T4" = "0" -o "$T5" = "0" ]; then
echo "[-] Error: do not use this script in /tmp or /var/tmp." 1>&2
exit 1
fi
fi
# If @@ is specified, but there's no -f, let's come up with a temporary input
# file name.
TRACE_DIR="$OUT_DIR/.traces"
if [ "$STDIN_FILE" = "" ]; then
if echo "$*" | grep -qF '@@'; then
STDIN_FILE="$TRACE_DIR/.cur_input"
fi
fi
# Check for obvious errors.
if [ ! "$MEM_LIMIT" = "none" ]; then
if [ "$MEM_LIMIT" -lt "5" ]; then
echo "[-] Error: dangerously low memory limit." 1>&2
exit 1
fi
fi
if [ ! "$TIMEOUT" = "none" ]; then
if [ "$TIMEOUT" -lt "10" ]; then
echo "[-] Error: dangerously low timeout." 1>&2
exit 1
fi
fi
if [ ! -f "$TARGET_BIN" -o ! -x "$TARGET_BIN" ]; then
TNEW="`which "$TARGET_BIN" 2>/dev/null`"
if [ ! -f "$TNEW" -o ! -x "$TNEW" ]; then
echo "[-] Error: binary '$TARGET_BIN' not found or not executable." 1>&2
exit 1
fi
TARGET_BIN="$TNEW"
fi
if [ "$AFL_SKIP_BIN_CHECK" = "" -a "$QEMU_MODE" = "" ]; then
if ! grep -qF "__AFL_SHM_ID" "$TARGET_BIN"; then
echo "[-] Error: binary '$TARGET_BIN' doesn't appear to be instrumented." 1>&2
exit 1
fi
fi
if [ ! -d "$IN_DIR" ]; then
echo "[-] Error: directory '$IN_DIR' not found." 1>&2
exit 1
fi
test -d "$IN_DIR/queue" && IN_DIR="$IN_DIR/queue"
find "$OUT_DIR" -name 'id[:_]*' -maxdepth 1 -exec rm -- {} \; 2>/dev/null
rm -rf "$TRACE_DIR" 2>/dev/null
rmdir "$OUT_DIR" 2>/dev/null
if [ -d "$OUT_DIR" ]; then
echo "[-] Error: directory '$OUT_DIR' exists and is not empty - delete it first." 1>&2
exit 1
fi
mkdir -m 700 -p "$TRACE_DIR" || exit 1
if [ ! "$STDIN_FILE" = "" ]; then
rm -f "$STDIN_FILE" || exit 1
touch "$STDIN_FILE" || exit 1
fi
if [ "$AFL_PATH" = "" ]; then
SHOWMAP="${0%/afl-cmin}/afl-showmap"
else
SHOWMAP="$AFL_PATH/afl-showmap"
fi
if [ ! -x "$SHOWMAP" ]; then
echo "[-] Error: can't find 'afl-showmap' - please set AFL_PATH." 1>&2
rm -rf "$TRACE_DIR"
exit 1
fi
IN_COUNT=$((`ls -- "$IN_DIR" 2>/dev/null | wc -l`))
if [ "$IN_COUNT" = "0" ]; then
echo "[+] Hmm, no inputs in the target directory. Nothing to be done."
rm -rf "$TRACE_DIR"
exit 1
fi
FIRST_FILE=`ls "$IN_DIR" | head -1`
# Make sure that we're not dealing with a directory.
if [ -d "$IN_DIR/$FIRST_FILE" ]; then
echo "[-] Error: The target directory contains subdirectories - please fix." 1>&2
rm -rf "$TRACE_DIR"
exit 1
fi
# Check for the more efficient way to copy files...
if ln "$IN_DIR/$FIRST_FILE" "$TRACE_DIR/.link_test" 2>/dev/null; then
CP_TOOL=ln
else
CP_TOOL=cp
fi
# Make sure that we can actually get anything out of afl-showmap before we
# waste too much time.
echo "[*] Testing the target binary..."
if [ "$STDIN_FILE" = "" ]; then
AFL_CMIN_ALLOW_ANY=1 "$SHOWMAP" -m "$MEM_LIMIT" -t "$TIMEOUT" -o "$TRACE_DIR/.run_test" -Z $EXTRA_PAR -- "$@" <"$IN_DIR/$FIRST_FILE"
else
cp "$IN_DIR/$FIRST_FILE" "$STDIN_FILE"
AFL_CMIN_ALLOW_ANY=1 "$SHOWMAP" -m "$MEM_LIMIT" -t "$TIMEOUT" -o "$TRACE_DIR/.run_test" -Z $EXTRA_PAR -A "$STDIN_FILE" -- "$@" </dev/null
fi
FIRST_COUNT=$((`grep -c . "$TRACE_DIR/.run_test"`))
if [ "$FIRST_COUNT" -gt "0" ]; then
echo "[+] OK, $FIRST_COUNT tuples recorded."
else
echo "[-] Error: no instrumentation output detected (perhaps crash or timeout)." 1>&2
test "$AFL_KEEP_TRACES" = "" && rm -rf "$TRACE_DIR"
exit 1
fi
# Let's roll!
#############################
# STEP 1: COLLECTING TRACES #
#############################
echo "[*] Obtaining traces for input files in '$IN_DIR'..."
(
CUR=0
if [ "$STDIN_FILE" = "" ]; then
while read -r fn; do
CUR=$((CUR+1))
printf "\\r Processing file $CUR/$IN_COUNT... "
"$SHOWMAP" -m "$MEM_LIMIT" -t "$TIMEOUT" -o "$TRACE_DIR/$fn" -Z $EXTRA_PAR -- "$@" <"$IN_DIR/$fn"
done < <(ls "$IN_DIR")
else
while read -r fn; do
CUR=$((CUR+1))
printf "\\r Processing file $CUR/$IN_COUNT... "
cp "$IN_DIR/$fn" "$STDIN_FILE"
"$SHOWMAP" -m "$MEM_LIMIT" -t "$TIMEOUT" -o "$TRACE_DIR/$fn" -Z $EXTRA_PAR -A "$STDIN_FILE" -- "$@" </dev/null
done < <(ls "$IN_DIR")
fi
)
echo
##########################
# STEP 2: SORTING TUPLES #
##########################
# With this out of the way, we sort all tuples by popularity across all
# datasets. The reasoning here is that we won't be able to avoid the files
# that trigger unique tuples anyway, so we will want to start with them and
# see what's left.
echo "[*] Sorting trace sets (this may take a while)..."
ls "$IN_DIR" | sed "s#^#$TRACE_DIR/#" | tr '\n' '\0' | xargs -0 -n 1 cat | \
sort | uniq -c | sort -n >"$TRACE_DIR/.all_uniq"
TUPLE_COUNT=$((`grep -c . "$TRACE_DIR/.all_uniq"`))
echo "[+] Found $TUPLE_COUNT unique tuples across $IN_COUNT files."
#####################################
# STEP 3: SELECTING CANDIDATE FILES #
#####################################
# The next step is to find the best candidate for each tuple. The "best"
# part is understood simply as the smallest input that includes a particular
# tuple in its trace. Empirical evidence suggests that this produces smaller
# datasets than more involved algorithms that could be still pulled off in
# a shell script.
echo "[*] Finding best candidates for each tuple..."
CUR=0
while read -r fn; do
CUR=$((CUR+1))
printf "\\r Processing file $CUR/$IN_COUNT... "
sed "s#\$# $fn#" "$TRACE_DIR/$fn" >>"$TRACE_DIR/.candidate_list"
done < <(ls -rS "$IN_DIR")
echo
##############################
# STEP 4: LOADING CANDIDATES #
##############################
# At this point, we have a file of tuple-file pairs, sorted by file size
# in ascending order (as a consequence of ls -rS). By doing sort keyed
# only by tuple (-k 1,1) and configured to output only the first line for
# every key (-s -u), we end up with the smallest file for each tuple.
echo "[*] Sorting candidate list (be patient)..."
sort -k1,1 -s -u "$TRACE_DIR/.candidate_list" | \
sed 's/^/BEST_FILE[/;s/ /]="/;s/$/"/' >"$TRACE_DIR/.candidate_script"
if [ ! -s "$TRACE_DIR/.candidate_script" ]; then
echo "[-] Error: no traces obtained from test cases, check syntax!" 1>&2
test "$AFL_KEEP_TRACES" = "" && rm -rf "$TRACE_DIR"
exit 1
fi
# The sed command converted the sorted list to a shell script that populates
# BEST_FILE[tuple]="fname". Let's load that!
. "$TRACE_DIR/.candidate_script"
##########################
# STEP 5: WRITING OUTPUT #
##########################
# The final trick is to grab the top pick for each tuple, unless said tuple is
# already set due to the inclusion of an earlier candidate; and then put all
# tuples associated with the newly-added file to the "already have" list. The
# loop works from least popular tuples and toward the most common ones.
echo "[*] Processing candidates and writing output files..."
CUR=0
touch "$TRACE_DIR/.already_have"
while read -r cnt tuple; do
CUR=$((CUR+1))
printf "\\r Processing tuple $CUR/$TUPLE_COUNT... "
# If we already have this tuple, skip it.
grep -q "^$tuple\$" "$TRACE_DIR/.already_have" && continue
FN=${BEST_FILE[tuple]}
$CP_TOOL "$IN_DIR/$FN" "$OUT_DIR/$FN"
if [ "$((CUR % 5))" = "0" ]; then
sort -u "$TRACE_DIR/$FN" "$TRACE_DIR/.already_have" >"$TRACE_DIR/.tmp"
mv -f "$TRACE_DIR/.tmp" "$TRACE_DIR/.already_have"
else
cat "$TRACE_DIR/$FN" >>"$TRACE_DIR/.already_have"
fi
done <"$TRACE_DIR/.all_uniq"
echo
OUT_COUNT=`ls -- "$OUT_DIR" | wc -l`
if [ "$OUT_COUNT" = "1" ]; then
echo "[!] WARNING: All test cases had the same traces, check syntax!"
fi
echo "[+] Narrowed down to $OUT_COUNT files, saved in '$OUT_DIR'."
echo
test "$AFL_KEEP_TRACES" = "" && rm -rf "$TRACE_DIR"
exit 0

File diff suppressed because it is too large Load Diff

@ -1,346 +1,346 @@
/*
Copyright 2013 Google LLC All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at:
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
american fuzzy lop - wrapper for GCC and clang
----------------------------------------------
Written and maintained by Michal Zalewski <lcamtuf@google.com>
This program is a drop-in replacement for GCC or clang. The most common way
of using it is to pass the path to afl-gcc or afl-clang via CC when invoking
./configure.
(Of course, use CXX and point it to afl-g++ / afl-clang++ for C++ code.)
The wrapper needs to know the path to afl-as (renamed to 'as'). The default
is /usr/local/lib/afl/. A convenient way to specify alternative directories
would be to set AFL_PATH.
If AFL_HARDEN is set, the wrapper will compile the target app with various
hardening options that may help detect memory management issues more
reliably. You can also specify AFL_USE_ASAN to enable ASAN.
If you want to call a non-default compiler as a next step of the chain,
specify its location via AFL_CC or AFL_CXX.
*/
#define AFL_MAIN
#include "config.h"
#include "types.h"
#include "debug.h"
#include "alloc-inl.h"
#include <stdio.h>
#include <unistd.h>
#include <stdlib.h>
#include <string.h>
static u8* as_path; /* Path to the AFL 'as' wrapper */
static u8** cc_params; /* Parameters passed to the real CC */
static u32 cc_par_cnt = 1; /* Param count, including argv0 */
static u8 be_quiet, /* Quiet mode */
clang_mode; /* Invoked as afl-clang*? */
/* Try to find our "fake" GNU assembler in AFL_PATH or at the location derived
from argv[0]. If that fails, abort. */
static void find_as(u8* argv0) {
u8 *afl_path = getenv("AFL_PATH");
u8 *slash, *tmp;
if (afl_path) {
tmp = alloc_printf("%s/as", afl_path);
if (!access(tmp, X_OK)) {
as_path = afl_path;
ck_free(tmp);
return;
}
ck_free(tmp);
}
slash = strrchr(argv0, '/');
if (slash) {
u8 *dir;
*slash = 0;
dir = ck_strdup(argv0);
*slash = '/';
tmp = alloc_printf("%s/afl-as", dir);
if (!access(tmp, X_OK)) {
as_path = dir;
ck_free(tmp);
return;
}
ck_free(tmp);
ck_free(dir);
}
if (!access(AFL_PATH "/as", X_OK)) {
as_path = AFL_PATH;
return;
}
FATAL("Unable to find AFL wrapper binary for 'as'. Please set AFL_PATH");
}
/* Copy argv to cc_params, making the necessary edits. */
static void edit_params(u32 argc, char** argv) {
u8 fortify_set = 0, asan_set = 0;
u8 *name;
#if defined(__FreeBSD__) && defined(__x86_64__)
u8 m32_set = 0;
#endif
cc_params = ck_alloc((argc + 128) * sizeof(u8*));
name = strrchr(argv[0], '/');
if (!name) name = argv[0]; else name++;
if (!strncmp(name, "afl-clang", 9)) {
clang_mode = 1;
setenv(CLANG_ENV_VAR, "1", 1);
if (!strcmp(name, "afl-clang++")) {
u8* alt_cxx = getenv("AFL_CXX");
cc_params[0] = alt_cxx ? alt_cxx : (u8*)"clang++";
} else {
u8* alt_cc = getenv("AFL_CC");
cc_params[0] = alt_cc ? alt_cc : (u8*)"clang";
}
} else {
/* With GCJ and Eclipse installed, you can actually compile Java! The
instrumentation will work (amazingly). Alas, unhandled exceptions do
not call abort(), so afl-fuzz would need to be modified to equate
non-zero exit codes with crash conditions when working with Java
binaries. Meh. */
#ifdef __APPLE__
if (!strcmp(name, "afl-g++")) cc_params[0] = getenv("AFL_CXX");
else if (!strcmp(name, "afl-gcj")) cc_params[0] = getenv("AFL_GCJ");
else cc_params[0] = getenv("AFL_CC");
if (!cc_params[0]) {
SAYF("\n" cLRD "[-] " cRST
"On Apple systems, 'gcc' is usually just a wrapper for clang. Please use the\n"
" 'afl-clang' utility instead of 'afl-gcc'. If you really have GCC installed,\n"
" set AFL_CC or AFL_CXX to specify the correct path to that compiler.\n");
FATAL("AFL_CC or AFL_CXX required on MacOS X");
}
#else
if (!strcmp(name, "afl-g++")) {
u8* alt_cxx = getenv("AFL_CXX");
cc_params[0] = alt_cxx ? alt_cxx : (u8*)"g++";
} else if (!strcmp(name, "afl-gcj")) {
u8* alt_cc = getenv("AFL_GCJ");
cc_params[0] = alt_cc ? alt_cc : (u8*)"gcj";
} else {
u8* alt_cc = getenv("AFL_CC");
cc_params[0] = alt_cc ? alt_cc : (u8*)"gcc";
}
#endif /* __APPLE__ */
}
while (--argc) {
u8* cur = *(++argv);
if (!strncmp(cur, "-B", 2)) {
if (!be_quiet) WARNF("-B is already set, overriding");
if (!cur[2] && argc > 1) { argc--; argv++; }
continue;
}
if (!strcmp(cur, "-integrated-as")) continue;
if (!strcmp(cur, "-pipe")) continue;
#if defined(__FreeBSD__) && defined(__x86_64__)
if (!strcmp(cur, "-m32")) m32_set = 1;
#endif
if (!strcmp(cur, "-fsanitize=address") ||
!strcmp(cur, "-fsanitize=memory")) asan_set = 1;
if (strstr(cur, "FORTIFY_SOURCE")) fortify_set = 1;
cc_params[cc_par_cnt++] = cur;
}
cc_params[cc_par_cnt++] = "-B";
cc_params[cc_par_cnt++] = as_path;
if (clang_mode)
cc_params[cc_par_cnt++] = "-no-integrated-as";
if (getenv("AFL_HARDEN")) {
cc_params[cc_par_cnt++] = "-fstack-protector-all";
if (!fortify_set)
cc_params[cc_par_cnt++] = "-D_FORTIFY_SOURCE=2";
}
if (asan_set) {
/* Pass this on to afl-as to adjust map density. */
setenv("AFL_USE_ASAN", "1", 1);
} else if (getenv("AFL_USE_ASAN")) {
if (getenv("AFL_USE_MSAN"))
FATAL("ASAN and MSAN are mutually exclusive");
if (getenv("AFL_HARDEN"))
FATAL("ASAN and AFL_HARDEN are mutually exclusive");
cc_params[cc_par_cnt++] = "-U_FORTIFY_SOURCE";
cc_params[cc_par_cnt++] = "-fsanitize=address";
} else if (getenv("AFL_USE_MSAN")) {
if (getenv("AFL_USE_ASAN"))
FATAL("ASAN and MSAN are mutually exclusive");
if (getenv("AFL_HARDEN"))
FATAL("MSAN and AFL_HARDEN are mutually exclusive");
cc_params[cc_par_cnt++] = "-U_FORTIFY_SOURCE";
cc_params[cc_par_cnt++] = "-fsanitize=memory";
}
if (!getenv("AFL_DONT_OPTIMIZE")) {
#if defined(__FreeBSD__) && defined(__x86_64__)
/* On 64-bit FreeBSD systems, clang -g -m32 is broken, but -m32 itself
works OK. This has nothing to do with us, but let's avoid triggering
that bug. */
if (!clang_mode || !m32_set)
cc_params[cc_par_cnt++] = "-g";
#else
cc_params[cc_par_cnt++] = "-g";
#endif
cc_params[cc_par_cnt++] = "-O3";
cc_params[cc_par_cnt++] = "-funroll-loops";
/* Two indicators that you're building for fuzzing; one of them is
AFL-specific, the other is shared with libfuzzer. */
cc_params[cc_par_cnt++] = "-D__AFL_COMPILER=1";
cc_params[cc_par_cnt++] = "-DFUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION=1";
}
if (getenv("AFL_NO_BUILTIN")) {
cc_params[cc_par_cnt++] = "-fno-builtin-strcmp";
cc_params[cc_par_cnt++] = "-fno-builtin-strncmp";
cc_params[cc_par_cnt++] = "-fno-builtin-strcasecmp";
cc_params[cc_par_cnt++] = "-fno-builtin-strncasecmp";
cc_params[cc_par_cnt++] = "-fno-builtin-memcmp";
cc_params[cc_par_cnt++] = "-fno-builtin-strstr";
cc_params[cc_par_cnt++] = "-fno-builtin-strcasestr";
}
cc_params[cc_par_cnt] = NULL;
}
/* Main entry point */
int main(int argc, char** argv) {
if (isatty(2) && !getenv("AFL_QUIET")) {
SAYF(cCYA "afl-cc " cBRI VERSION cRST " by <lcamtuf@google.com>\n");
} else be_quiet = 1;
if (argc < 2) {
SAYF("\n"
"This is a helper application for afl-fuzz. It serves as a drop-in replacement\n"
"for gcc or clang, letting you recompile third-party code with the required\n"
"runtime instrumentation. A common use pattern would be one of the following:\n\n"
" CC=%s/afl-gcc ./configure\n"
" CXX=%s/afl-g++ ./configure\n\n"
"You can specify custom next-stage toolchain via AFL_CC, AFL_CXX, and AFL_AS.\n"
"Setting AFL_HARDEN enables hardening optimizations in the compiled code.\n\n",
BIN_PATH, BIN_PATH);
exit(1);
}
find_as(argv[0]);
edit_params(argc, argv);
execvp(cc_params[0], (char**)cc_params);
FATAL("Oops, failed to execute '%s' - check your PATH", cc_params[0]);
return 0;
}
/*
Copyright 2013 Google LLC All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at:
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
american fuzzy lop - wrapper for GCC and clang
----------------------------------------------
Written and maintained by Michal Zalewski <lcamtuf@google.com>
This program is a drop-in replacement for GCC or clang. The most common way
of using it is to pass the path to afl-gcc or afl-clang via CC when invoking
./configure.
(Of course, use CXX and point it to afl-g++ / afl-clang++ for C++ code.)
The wrapper needs to know the path to afl-as (renamed to 'as'). The default
is /usr/local/lib/afl/. A convenient way to specify alternative directories
would be to set AFL_PATH.
If AFL_HARDEN is set, the wrapper will compile the target app with various
hardening options that may help detect memory management issues more
reliably. You can also specify AFL_USE_ASAN to enable ASAN.
If you want to call a non-default compiler as a next step of the chain,
specify its location via AFL_CC or AFL_CXX.
*/
#define AFL_MAIN
#include "config.h"
#include "types.h"
#include "debug.h"
#include "alloc-inl.h"
#include <stdio.h>
#include <unistd.h>
#include <stdlib.h>
#include <string.h>
static u8* as_path; /* Path to the AFL 'as' wrapper */
static u8** cc_params; /* Parameters passed to the real CC */
static u32 cc_par_cnt = 1; /* Param count, including argv0 */
static u8 be_quiet, /* Quiet mode */
clang_mode; /* Invoked as afl-clang*? */
/* Try to find our "fake" GNU assembler in AFL_PATH or at the location derived
from argv[0]. If that fails, abort. */
static void find_as(u8* argv0) {
u8 *afl_path = getenv("AFL_PATH");
u8 *slash, *tmp;
if (afl_path) {
tmp = alloc_printf("%s/as", afl_path);
if (!access(tmp, X_OK)) {
as_path = afl_path;
ck_free(tmp);
return;
}
ck_free(tmp);
}
slash = strrchr(argv0, '/');
if (slash) {
u8 *dir;
*slash = 0;
dir = ck_strdup(argv0);
*slash = '/';
tmp = alloc_printf("%s/afl-as", dir);
if (!access(tmp, X_OK)) {
as_path = dir;
ck_free(tmp);
return;
}
ck_free(tmp);
ck_free(dir);
}
if (!access(AFL_PATH "/as", X_OK)) {
as_path = AFL_PATH;
return;
}
FATAL("Unable to find AFL wrapper binary for 'as'. Please set AFL_PATH");
}
/* Copy argv to cc_params, making the necessary edits. */
static void edit_params(u32 argc, char** argv) {
u8 fortify_set = 0, asan_set = 0;
u8 *name;
#if defined(__FreeBSD__) && defined(__x86_64__)
u8 m32_set = 0;
#endif
cc_params = ck_alloc((argc + 128) * sizeof(u8*));
name = strrchr(argv[0], '/');
if (!name) name = argv[0]; else name++;
if (!strncmp(name, "afl-clang", 9)) {
clang_mode = 1;
setenv(CLANG_ENV_VAR, "1", 1);
if (!strcmp(name, "afl-clang++")) {
u8* alt_cxx = getenv("AFL_CXX");
cc_params[0] = alt_cxx ? alt_cxx : (u8*)"clang++";
} else {
u8* alt_cc = getenv("AFL_CC");
cc_params[0] = alt_cc ? alt_cc : (u8*)"clang";
}
} else {
/* With GCJ and Eclipse installed, you can actually compile Java! The
instrumentation will work (amazingly). Alas, unhandled exceptions do
not call abort(), so afl-fuzz would need to be modified to equate
non-zero exit codes with crash conditions when working with Java
binaries. Meh. */
#ifdef __APPLE__
if (!strcmp(name, "afl-g++")) cc_params[0] = getenv("AFL_CXX");
else if (!strcmp(name, "afl-gcj")) cc_params[0] = getenv("AFL_GCJ");
else cc_params[0] = getenv("AFL_CC");
if (!cc_params[0]) {
SAYF("\n" cLRD "[-] " cRST
"On Apple systems, 'gcc' is usually just a wrapper for clang. Please use the\n"
" 'afl-clang' utility instead of 'afl-gcc'. If you really have GCC installed,\n"
" set AFL_CC or AFL_CXX to specify the correct path to that compiler.\n");
FATAL("AFL_CC or AFL_CXX required on MacOS X");
}
#else
if (!strcmp(name, "afl-g++")) {
u8* alt_cxx = getenv("AFL_CXX");
cc_params[0] = alt_cxx ? alt_cxx : (u8*)"g++";
} else if (!strcmp(name, "afl-gcj")) {
u8* alt_cc = getenv("AFL_GCJ");
cc_params[0] = alt_cc ? alt_cc : (u8*)"gcj";
} else {
u8* alt_cc = getenv("AFL_CC");
cc_params[0] = alt_cc ? alt_cc : (u8*)"gcc";
}
#endif /* __APPLE__ */
}
while (--argc) {
u8* cur = *(++argv);
if (!strncmp(cur, "-B", 2)) {
if (!be_quiet) WARNF("-B is already set, overriding");
if (!cur[2] && argc > 1) { argc--; argv++; }
continue;
}
if (!strcmp(cur, "-integrated-as")) continue;
if (!strcmp(cur, "-pipe")) continue;
#if defined(__FreeBSD__) && defined(__x86_64__)
if (!strcmp(cur, "-m32")) m32_set = 1;
#endif
if (!strcmp(cur, "-fsanitize=address") ||
!strcmp(cur, "-fsanitize=memory")) asan_set = 1;
if (strstr(cur, "FORTIFY_SOURCE")) fortify_set = 1;
cc_params[cc_par_cnt++] = cur;
}
cc_params[cc_par_cnt++] = "-B";
cc_params[cc_par_cnt++] = as_path;
if (clang_mode)
cc_params[cc_par_cnt++] = "-no-integrated-as";
if (getenv("AFL_HARDEN")) {
cc_params[cc_par_cnt++] = "-fstack-protector-all";
if (!fortify_set)
cc_params[cc_par_cnt++] = "-D_FORTIFY_SOURCE=2";
}
if (asan_set) {
/* Pass this on to afl-as to adjust map density. */
setenv("AFL_USE_ASAN", "1", 1);
} else if (getenv("AFL_USE_ASAN")) {
if (getenv("AFL_USE_MSAN"))
FATAL("ASAN and MSAN are mutually exclusive");
if (getenv("AFL_HARDEN"))
FATAL("ASAN and AFL_HARDEN are mutually exclusive");
cc_params[cc_par_cnt++] = "-U_FORTIFY_SOURCE";
cc_params[cc_par_cnt++] = "-fsanitize=address";
} else if (getenv("AFL_USE_MSAN")) {
if (getenv("AFL_USE_ASAN"))
FATAL("ASAN and MSAN are mutually exclusive");
if (getenv("AFL_HARDEN"))
FATAL("MSAN and AFL_HARDEN are mutually exclusive");
cc_params[cc_par_cnt++] = "-U_FORTIFY_SOURCE";
cc_params[cc_par_cnt++] = "-fsanitize=memory";
}
if (!getenv("AFL_DONT_OPTIMIZE")) {
#if defined(__FreeBSD__) && defined(__x86_64__)
/* On 64-bit FreeBSD systems, clang -g -m32 is broken, but -m32 itself
works OK. This has nothing to do with us, but let's avoid triggering
that bug. */
if (!clang_mode || !m32_set)
cc_params[cc_par_cnt++] = "-g";
#else
cc_params[cc_par_cnt++] = "-g";
#endif
cc_params[cc_par_cnt++] = "-O3";
cc_params[cc_par_cnt++] = "-funroll-loops";
/* Two indicators that you're building for fuzzing; one of them is
AFL-specific, the other is shared with libfuzzer. */
cc_params[cc_par_cnt++] = "-D__AFL_COMPILER=1";
cc_params[cc_par_cnt++] = "-DFUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION=1";
}
if (getenv("AFL_NO_BUILTIN")) {
cc_params[cc_par_cnt++] = "-fno-builtin-strcmp";
cc_params[cc_par_cnt++] = "-fno-builtin-strncmp";
cc_params[cc_par_cnt++] = "-fno-builtin-strcasecmp";
cc_params[cc_par_cnt++] = "-fno-builtin-strncasecmp";
cc_params[cc_par_cnt++] = "-fno-builtin-memcmp";
cc_params[cc_par_cnt++] = "-fno-builtin-strstr";
cc_params[cc_par_cnt++] = "-fno-builtin-strcasestr";
}
cc_params[cc_par_cnt] = NULL;
}
/* Main entry point */
int main(int argc, char** argv) {
if (isatty(2) && !getenv("AFL_QUIET")) {
SAYF(cCYA "afl-cc " cBRI VERSION cRST " by <lcamtuf@google.com>\n");
} else be_quiet = 1;
if (argc < 2) {
SAYF("\n"
"This is a helper application for afl-fuzz. It serves as a drop-in replacement\n"
"for gcc or clang, letting you recompile third-party code with the required\n"
"runtime instrumentation. A common use pattern would be one of the following:\n\n"
" CC=%s/afl-gcc ./configure\n"
" CXX=%s/afl-g++ ./configure\n\n"
"You can specify custom next-stage toolchain via AFL_CC, AFL_CXX, and AFL_AS.\n"
"Setting AFL_HARDEN enables hardening optimizations in the compiled code.\n\n",
BIN_PATH, BIN_PATH);
exit(1);
}
find_as(argv[0]);
edit_params(argc, argv);
execvp(cc_params[0], (char**)cc_params);
FATAL("Oops, failed to execute '%s' - check your PATH", cc_params[0]);
return 0;
}

@ -1,260 +1,260 @@
/*
Copyright 2015 Google LLC All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at:
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
american fuzzy lop - free CPU gizmo
-----------------------------------
Written and maintained by Michal Zalewski <lcamtuf@google.com>
This tool provides a fairly accurate measurement of CPU preemption rate.
It is meant to complement the quick-and-dirty load average widget shown
in the afl-fuzz UI. See docs/parallel_fuzzing.txt for more info.
For some work loads, the tool may actually suggest running more instances
than you have CPU cores. This can happen if the tested program is spending
a portion of its run time waiting for I/O, rather than being 100%
CPU-bound.
The idea for the getrusage()-based approach comes from Jakub Wilk.
*/
#define AFL_MAIN
#include "android-ashmem.h"
#define _GNU_SOURCE
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <string.h>
#include <sched.h>
#include <sys/time.h>
#include <sys/times.h>
#include <sys/resource.h>
#include <sys/wait.h>
#include "types.h"
#include "debug.h"
#ifdef __linux__
# define HAVE_AFFINITY 1
#endif /* __linux__ */
/* Get unix time in microseconds. */
static u64 get_cur_time_us(void) {
struct timeval tv;
struct timezone tz;
gettimeofday(&tv, &tz);
return (tv.tv_sec * 1000000ULL) + tv.tv_usec;
}
/* Get CPU usage in microseconds. */
static u64 get_cpu_usage_us(void) {
struct rusage u;
getrusage(RUSAGE_SELF, &u);
return (u.ru_utime.tv_sec * 1000000ULL) + u.ru_utime.tv_usec +
(u.ru_stime.tv_sec * 1000000ULL) + u.ru_stime.tv_usec;
}
/* Measure preemption rate. */
static u32 measure_preemption(u32 target_ms) {
static volatile u32 v1, v2;
u64 st_t, en_t, st_c, en_c, real_delta, slice_delta;
s32 loop_repeats = 0;
st_t = get_cur_time_us();
st_c = get_cpu_usage_us();
repeat_loop:
v1 = CTEST_BUSY_CYCLES;
while (v1--) v2++;
sched_yield();
en_t = get_cur_time_us();
if (en_t - st_t < target_ms * 1000) {
loop_repeats++;
goto repeat_loop;
}
/* Let's see what percentage of this time we actually had a chance to
run, and how much time was spent in the penalty box. */
en_c = get_cpu_usage_us();
real_delta = (en_t - st_t) / 1000;
slice_delta = (en_c - st_c) / 1000;
return real_delta * 100 / slice_delta;
}
/* Do the benchmark thing. */
int main(int argc, char** argv) {
#ifdef HAVE_AFFINITY
u32 cpu_cnt = sysconf(_SC_NPROCESSORS_ONLN),
idle_cpus = 0, maybe_cpus = 0, i;
SAYF(cCYA "afl-gotcpu " cBRI VERSION cRST " by <lcamtuf@google.com>\n");
ACTF("Measuring per-core preemption rate (this will take %0.02f sec)...",
((double)CTEST_CORE_TRG_MS) / 1000);
for (i = 0; i < cpu_cnt; i++) {
s32 fr = fork();
if (fr < 0) PFATAL("fork failed");
if (!fr) {
cpu_set_t c;
u32 util_perc;
CPU_ZERO(&c);
CPU_SET(i, &c);
if (sched_setaffinity(0, sizeof(c), &c))
PFATAL("sched_setaffinity failed for cpu %d", i);
util_perc = measure_preemption(CTEST_CORE_TRG_MS);
if (util_perc < 110) {
SAYF(" Core #%u: " cLGN "AVAILABLE " cRST "(%u%%)\n", i, util_perc);
exit(0);
} else if (util_perc < 250) {
SAYF(" Core #%u: " cYEL "CAUTION " cRST "(%u%%)\n", i, util_perc);
exit(1);
}
SAYF(" Core #%u: " cLRD "OVERBOOKED " cRST "(%u%%)\n" cRST, i,
util_perc);
exit(2);
}
}
for (i = 0; i < cpu_cnt; i++) {
int ret;
if (waitpid(-1, &ret, 0) < 0) PFATAL("waitpid failed");
if (WEXITSTATUS(ret) == 0) idle_cpus++;
if (WEXITSTATUS(ret) <= 1) maybe_cpus++;
}
SAYF(cGRA "\n>>> ");
if (idle_cpus) {
if (maybe_cpus == idle_cpus) {
SAYF(cLGN "PASS: " cRST "You can run more processes on %u core%s.",
idle_cpus, idle_cpus > 1 ? "s" : "");
} else {
SAYF(cLGN "PASS: " cRST "You can run more processes on %u to %u core%s.",
idle_cpus, maybe_cpus, maybe_cpus > 1 ? "s" : "");
}
SAYF(cGRA " <<<" cRST "\n\n");
return 0;
}
if (maybe_cpus) {
SAYF(cYEL "CAUTION: " cRST "You may still have %u core%s available.",
maybe_cpus, maybe_cpus > 1 ? "s" : "");
SAYF(cGRA " <<<" cRST "\n\n");
return 1;
}
SAYF(cLRD "FAIL: " cRST "All cores are overbooked.");
SAYF(cGRA " <<<" cRST "\n\n");
return 2;
#else
u32 util_perc;
SAYF(cCYA "afl-gotcpu " cBRI VERSION cRST " by <lcamtuf@google.com>\n");
/* Run a busy loop for CTEST_TARGET_MS. */
ACTF("Measuring gross preemption rate (this will take %0.02f sec)...",
((double)CTEST_TARGET_MS) / 1000);
util_perc = measure_preemption(CTEST_TARGET_MS);
/* Deliver the final verdict. */
SAYF(cGRA "\n>>> ");
if (util_perc < 105) {
SAYF(cLGN "PASS: " cRST "You can probably run additional processes.");
} else if (util_perc < 130) {
SAYF(cYEL "CAUTION: " cRST "Your CPU may be somewhat overbooked (%u%%).",
util_perc);
} else {
SAYF(cLRD "FAIL: " cRST "Your CPU is overbooked (%u%%).", util_perc);
}
SAYF(cGRA " <<<" cRST "\n\n");
return (util_perc > 105) + (util_perc > 130);
#endif /* ^HAVE_AFFINITY */
}
/*
Copyright 2015 Google LLC All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at:
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
american fuzzy lop - free CPU gizmo
-----------------------------------
Written and maintained by Michal Zalewski <lcamtuf@google.com>
This tool provides a fairly accurate measurement of CPU preemption rate.
It is meant to complement the quick-and-dirty load average widget shown
in the afl-fuzz UI. See docs/parallel_fuzzing.txt for more info.
For some work loads, the tool may actually suggest running more instances
than you have CPU cores. This can happen if the tested program is spending
a portion of its run time waiting for I/O, rather than being 100%
CPU-bound.
The idea for the getrusage()-based approach comes from Jakub Wilk.
*/
#define AFL_MAIN
#include "android-ashmem.h"
#define _GNU_SOURCE
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <string.h>
#include <sched.h>
#include <sys/time.h>
#include <sys/times.h>
#include <sys/resource.h>
#include <sys/wait.h>
#include "types.h"
#include "debug.h"
#ifdef __linux__
# define HAVE_AFFINITY 1
#endif /* __linux__ */
/* Get unix time in microseconds. */
static u64 get_cur_time_us(void) {
struct timeval tv;
struct timezone tz;
gettimeofday(&tv, &tz);
return (tv.tv_sec * 1000000ULL) + tv.tv_usec;
}
/* Get CPU usage in microseconds. */
static u64 get_cpu_usage_us(void) {
struct rusage u;
getrusage(RUSAGE_SELF, &u);
return (u.ru_utime.tv_sec * 1000000ULL) + u.ru_utime.tv_usec +
(u.ru_stime.tv_sec * 1000000ULL) + u.ru_stime.tv_usec;
}
/* Measure preemption rate. */
static u32 measure_preemption(u32 target_ms) {
static volatile u32 v1, v2;
u64 st_t, en_t, st_c, en_c, real_delta, slice_delta;
s32 loop_repeats = 0;
st_t = get_cur_time_us();
st_c = get_cpu_usage_us();
repeat_loop:
v1 = CTEST_BUSY_CYCLES;
while (v1--) v2++;
sched_yield();
en_t = get_cur_time_us();
if (en_t - st_t < target_ms * 1000) {
loop_repeats++;
goto repeat_loop;
}
/* Let's see what percentage of this time we actually had a chance to
run, and how much time was spent in the penalty box. */
en_c = get_cpu_usage_us();
real_delta = (en_t - st_t) / 1000;
slice_delta = (en_c - st_c) / 1000;
return real_delta * 100 / slice_delta;
}
/* Do the benchmark thing. */
int main(int argc, char** argv) {
#ifdef HAVE_AFFINITY
u32 cpu_cnt = sysconf(_SC_NPROCESSORS_ONLN),
idle_cpus = 0, maybe_cpus = 0, i;
SAYF(cCYA "afl-gotcpu " cBRI VERSION cRST " by <lcamtuf@google.com>\n");
ACTF("Measuring per-core preemption rate (this will take %0.02f sec)...",
((double)CTEST_CORE_TRG_MS) / 1000);
for (i = 0; i < cpu_cnt; i++) {
s32 fr = fork();
if (fr < 0) PFATAL("fork failed");
if (!fr) {
cpu_set_t c;
u32 util_perc;
CPU_ZERO(&c);
CPU_SET(i, &c);
if (sched_setaffinity(0, sizeof(c), &c))
PFATAL("sched_setaffinity failed for cpu %d", i);
util_perc = measure_preemption(CTEST_CORE_TRG_MS);
if (util_perc < 110) {
SAYF(" Core #%u: " cLGN "AVAILABLE " cRST "(%u%%)\n", i, util_perc);
exit(0);
} else if (util_perc < 250) {
SAYF(" Core #%u: " cYEL "CAUTION " cRST "(%u%%)\n", i, util_perc);
exit(1);
}
SAYF(" Core #%u: " cLRD "OVERBOOKED " cRST "(%u%%)\n" cRST, i,
util_perc);
exit(2);
}
}
for (i = 0; i < cpu_cnt; i++) {
int ret;
if (waitpid(-1, &ret, 0) < 0) PFATAL("waitpid failed");
if (WEXITSTATUS(ret) == 0) idle_cpus++;
if (WEXITSTATUS(ret) <= 1) maybe_cpus++;
}
SAYF(cGRA "\n>>> ");
if (idle_cpus) {
if (maybe_cpus == idle_cpus) {
SAYF(cLGN "PASS: " cRST "You can run more processes on %u core%s.",
idle_cpus, idle_cpus > 1 ? "s" : "");
} else {
SAYF(cLGN "PASS: " cRST "You can run more processes on %u to %u core%s.",
idle_cpus, maybe_cpus, maybe_cpus > 1 ? "s" : "");
}
SAYF(cGRA " <<<" cRST "\n\n");
return 0;
}
if (maybe_cpus) {
SAYF(cYEL "CAUTION: " cRST "You may still have %u core%s available.",
maybe_cpus, maybe_cpus > 1 ? "s" : "");
SAYF(cGRA " <<<" cRST "\n\n");
return 1;
}
SAYF(cLRD "FAIL: " cRST "All cores are overbooked.");
SAYF(cGRA " <<<" cRST "\n\n");
return 2;
#else
u32 util_perc;
SAYF(cCYA "afl-gotcpu " cBRI VERSION cRST " by <lcamtuf@google.com>\n");
/* Run a busy loop for CTEST_TARGET_MS. */
ACTF("Measuring gross preemption rate (this will take %0.02f sec)...",
((double)CTEST_TARGET_MS) / 1000);
util_perc = measure_preemption(CTEST_TARGET_MS);
/* Deliver the final verdict. */
SAYF(cGRA "\n>>> ");
if (util_perc < 105) {
SAYF(cLGN "PASS: " cRST "You can probably run additional processes.");
} else if (util_perc < 130) {
SAYF(cYEL "CAUTION: " cRST "Your CPU may be somewhat overbooked (%u%%).",
util_perc);
} else {
SAYF(cLRD "FAIL: " cRST "Your CPU is overbooked (%u%%).", util_perc);
}
SAYF(cGRA " <<<" cRST "\n\n");
return (util_perc > 105) + (util_perc > 130);
#endif /* ^HAVE_AFFINITY */
}

@ -1,170 +1,170 @@
#!/bin/sh
#
# american fuzzy lop - Advanced Persistent Graphing
# -------------------------------------------------
#
# Written and maintained by Michal Zalewski <lcamtuf@google.com>
# Based on a design & prototype by Michael Rash.
#
# Copyright 2014, 2015 Google LLC All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
echo "progress plotting utility for afl-fuzz by <lcamtuf@google.com>"
echo
if [ ! "$#" = "2" ]; then
cat 1>&2 <<_EOF_
This program generates gnuplot images from afl-fuzz output data. Usage:
$0 afl_state_dir graph_output_dir
The afl_state_dir parameter should point to an existing state directory for any
active or stopped instance of afl-fuzz; while graph_output_dir should point to
an empty directory where this tool can write the resulting plots to.
The program will put index.html and three PNG images in the output directory;
you should be able to view it with any web browser of your choice.
_EOF_
exit 1
fi
if [ "$AFL_ALLOW_TMP" = "" ]; then
echo "$1" | grep -qE '^(/var)?/tmp/'
T1="$?"
echo "$2" | grep -qE '^(/var)?/tmp/'
T2="$?"
if [ "$T1" = "0" -o "$T2" = "0" ]; then
echo "[-] Error: this script shouldn't be used with shared /tmp directories." 1>&2
exit 1
fi
fi
if [ ! -f "$1/plot_data" ]; then
echo "[-] Error: input directory is not valid (missing 'plot_data')." 1>&2
exit 1
fi
BANNER="`cat "$1/fuzzer_stats" | grep '^afl_banner ' | cut -d: -f2- | cut -b2-`"
test "$BANNER" = "" && BANNER="(none)"
GNUPLOT=`which gnuplot 2>/dev/null`
if [ "$GNUPLOT" = "" ]; then
echo "[-] Error: can't find 'gnuplot' in your \$PATH." 1>&2
exit 1
fi
mkdir "$2" 2>/dev/null
if [ ! -d "$2" ]; then
echo "[-] Error: unable to create the output directory - pick another location." 1>&2
exit 1
fi
rm -f "$2/high_freq.png" "$2/low_freq.png" "$2/exec_speed.png"
mv -f "$2/index.html" "$2/index.html.orig" 2>/dev/null
echo "[*] Generating plots..."
(
cat <<_EOF_
set terminal png truecolor enhanced size 1000,300 butt
set output '$2/high_freq.png'
set xdata time
set timefmt '%s'
set format x "%b %d\n%H:%M"
set tics font 'small'
unset mxtics
unset mytics
set grid xtics linetype 0 linecolor rgb '#e0e0e0'
set grid ytics linetype 0 linecolor rgb '#e0e0e0'
set border linecolor rgb '#50c0f0'
set tics textcolor rgb '#000000'
set key outside
set autoscale xfixmin
set autoscale xfixmax
plot '$1/plot_data' using 1:4 with filledcurve x1 title 'total paths' linecolor rgb '#000000' fillstyle transparent solid 0.2 noborder, \\
'' using 1:3 with filledcurve x1 title 'current path' linecolor rgb '#f0f0f0' fillstyle transparent solid 0.5 noborder, \\
'' using 1:5 with lines title 'pending paths' linecolor rgb '#0090ff' linewidth 3, \\
'' using 1:6 with lines title 'pending favs' linecolor rgb '#c00080' linewidth 3, \\
'' using 1:2 with lines title 'cycles done' linecolor rgb '#c000f0' linewidth 3
set terminal png truecolor enhanced size 1000,200 butt
set output '$2/low_freq.png'
plot '$1/plot_data' using 1:8 with filledcurve x1 title '' linecolor rgb '#c00080' fillstyle transparent solid 0.2 noborder, \\
'' using 1:8 with lines title ' uniq crashes' linecolor rgb '#c00080' linewidth 3, \\
'' using 1:9 with lines title 'uniq hangs' linecolor rgb '#c000f0' linewidth 3, \\
'' using 1:10 with lines title 'levels' linecolor rgb '#0090ff' linewidth 3
set terminal png truecolor enhanced size 1000,200 butt
set output '$2/exec_speed.png'
plot '$1/plot_data' using 1:11 with filledcurve x1 title '' linecolor rgb '#0090ff' fillstyle transparent solid 0.2 noborder, \\
'$1/plot_data' using 1:11 with lines title ' execs/sec' linecolor rgb '#0090ff' linewidth 3 smooth bezier;
_EOF_
) | gnuplot
if [ ! -s "$2/exec_speed.png" ]; then
echo "[-] Error: something went wrong! Perhaps you have an ancient version of gnuplot?" 1>&2
exit 1
fi
echo "[*] Generating index.html..."
cat >"$2/index.html" <<_EOF_
<table style="font-family: 'Trebuchet MS', 'Tahoma', 'Arial', 'Helvetica'">
<tr><td style="width: 18ex"><b>Banner:</b></td><td>$BANNER</td></tr>
<tr><td><b>Directory:</b></td><td>$1</td></tr>
<tr><td><b>Generated on:</b></td><td>`date`</td></tr>
</table>
<p>
<img src="high_freq.png" width=1000 height=300><p>
<img src="low_freq.png" width=1000 height=200><p>
<img src="exec_speed.png" width=1000 height=200>
_EOF_
# Make it easy to remotely view results when outputting directly to a directory
# served by Apache or other HTTP daemon. Since the plots aren't horribly
# sensitive, this seems like a reasonable trade-off.
chmod 755 "$2"
chmod 644 "$2/high_freq.png" "$2/low_freq.png" "$2/exec_speed.png" "$2/index.html"
echo "[+] All done - enjoy your charts!"
exit 0
#!/bin/sh
#
# american fuzzy lop - Advanced Persistent Graphing
# -------------------------------------------------
#
# Written and maintained by Michal Zalewski <lcamtuf@google.com>
# Based on a design & prototype by Michael Rash.
#
# Copyright 2014, 2015 Google LLC All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
echo "progress plotting utility for afl-fuzz by <lcamtuf@google.com>"
echo
if [ ! "$#" = "2" ]; then
cat 1>&2 <<_EOF_
This program generates gnuplot images from afl-fuzz output data. Usage:
$0 afl_state_dir graph_output_dir
The afl_state_dir parameter should point to an existing state directory for any
active or stopped instance of afl-fuzz; while graph_output_dir should point to
an empty directory where this tool can write the resulting plots to.
The program will put index.html and three PNG images in the output directory;
you should be able to view it with any web browser of your choice.
_EOF_
exit 1
fi
if [ "$AFL_ALLOW_TMP" = "" ]; then
echo "$1" | grep -qE '^(/var)?/tmp/'
T1="$?"
echo "$2" | grep -qE '^(/var)?/tmp/'
T2="$?"
if [ "$T1" = "0" -o "$T2" = "0" ]; then
echo "[-] Error: this script shouldn't be used with shared /tmp directories." 1>&2
exit 1
fi
fi
if [ ! -f "$1/plot_data" ]; then
echo "[-] Error: input directory is not valid (missing 'plot_data')." 1>&2
exit 1
fi
BANNER="`cat "$1/fuzzer_stats" | grep '^afl_banner ' | cut -d: -f2- | cut -b2-`"
test "$BANNER" = "" && BANNER="(none)"
GNUPLOT=`which gnuplot 2>/dev/null`
if [ "$GNUPLOT" = "" ]; then
echo "[-] Error: can't find 'gnuplot' in your \$PATH." 1>&2
exit 1
fi
mkdir "$2" 2>/dev/null
if [ ! -d "$2" ]; then
echo "[-] Error: unable to create the output directory - pick another location." 1>&2
exit 1
fi
rm -f "$2/high_freq.png" "$2/low_freq.png" "$2/exec_speed.png"
mv -f "$2/index.html" "$2/index.html.orig" 2>/dev/null
echo "[*] Generating plots..."
(
cat <<_EOF_
set terminal png truecolor enhanced size 1000,300 butt
set output '$2/high_freq.png'
set xdata time
set timefmt '%s'
set format x "%b %d\n%H:%M"
set tics font 'small'
unset mxtics
unset mytics
set grid xtics linetype 0 linecolor rgb '#e0e0e0'
set grid ytics linetype 0 linecolor rgb '#e0e0e0'
set border linecolor rgb '#50c0f0'
set tics textcolor rgb '#000000'
set key outside
set autoscale xfixmin
set autoscale xfixmax
plot '$1/plot_data' using 1:4 with filledcurve x1 title 'total paths' linecolor rgb '#000000' fillstyle transparent solid 0.2 noborder, \\
'' using 1:3 with filledcurve x1 title 'current path' linecolor rgb '#f0f0f0' fillstyle transparent solid 0.5 noborder, \\
'' using 1:5 with lines title 'pending paths' linecolor rgb '#0090ff' linewidth 3, \\
'' using 1:6 with lines title 'pending favs' linecolor rgb '#c00080' linewidth 3, \\
'' using 1:2 with lines title 'cycles done' linecolor rgb '#c000f0' linewidth 3
set terminal png truecolor enhanced size 1000,200 butt
set output '$2/low_freq.png'
plot '$1/plot_data' using 1:8 with filledcurve x1 title '' linecolor rgb '#c00080' fillstyle transparent solid 0.2 noborder, \\
'' using 1:8 with lines title ' uniq crashes' linecolor rgb '#c00080' linewidth 3, \\
'' using 1:9 with lines title 'uniq hangs' linecolor rgb '#c000f0' linewidth 3, \\
'' using 1:10 with lines title 'levels' linecolor rgb '#0090ff' linewidth 3
set terminal png truecolor enhanced size 1000,200 butt
set output '$2/exec_speed.png'
plot '$1/plot_data' using 1:11 with filledcurve x1 title '' linecolor rgb '#0090ff' fillstyle transparent solid 0.2 noborder, \\
'$1/plot_data' using 1:11 with lines title ' execs/sec' linecolor rgb '#0090ff' linewidth 3 smooth bezier;
_EOF_
) | gnuplot
if [ ! -s "$2/exec_speed.png" ]; then
echo "[-] Error: something went wrong! Perhaps you have an ancient version of gnuplot?" 1>&2
exit 1
fi
echo "[*] Generating index.html..."
cat >"$2/index.html" <<_EOF_
<table style="font-family: 'Trebuchet MS', 'Tahoma', 'Arial', 'Helvetica'">
<tr><td style="width: 18ex"><b>Banner:</b></td><td>$BANNER</td></tr>
<tr><td><b>Directory:</b></td><td>$1</td></tr>
<tr><td><b>Generated on:</b></td><td>`date`</td></tr>
</table>
<p>
<img src="high_freq.png" width=1000 height=300><p>
<img src="low_freq.png" width=1000 height=200><p>
<img src="exec_speed.png" width=1000 height=200>
_EOF_
# Make it easy to remotely view results when outputting directly to a directory
# served by Apache or other HTTP daemon. Since the plots aren't horribly
# sensitive, this seems like a reasonable trade-off.
chmod 755 "$2"
chmod 644 "$2/high_freq.png" "$2/low_freq.png" "$2/exec_speed.png" "$2/index.html"
echo "[+] All done - enjoy your charts!"
exit 0

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

@ -1,163 +1,163 @@
#!/bin/sh
#
# american fuzzy lop - status check tool
# --------------------------------------
#
# Written and maintained by Michal Zalewski <lcamtuf@google.com>
#
# Copyright 2015 Google LLC All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# This tool summarizes the status of any locally-running synchronized
# instances of afl-fuzz.
#
echo "status check tool for afl-fuzz by <lcamtuf@google.com>"
echo
if [ "$1" = "-s" ]; then
SUMMARY_ONLY=1
DIR="$2"
else
unset SUMMARY_ONLY
DIR="$1"
fi
if [ "$DIR" = "" ]; then
echo "Usage: $0 [ -s ] afl_sync_dir" 1>&2
echo 1>&2
echo "The -s option causes the tool to skip all the per-fuzzer trivia and show" 1>&2
echo "just the summary results. See docs/parallel_fuzzing.txt for additional tips." 1>&2
echo 1>&2
exit 1
fi
cd "$DIR" || exit 1
if [ -d queue ]; then
echo "[-] Error: parameter is an individual output directory, not a sync dir." 1>&2
exit 1
fi
CUR_TIME=`date +%s`
TMP=`mktemp -t .afl-whatsup-XXXXXXXX` || TMP=`mktemp -p /data/local/tmp .afl-whatsup-XXXXXXXX` || exit 1
ALIVE_CNT=0
DEAD_CNT=0
TOTAL_TIME=0
TOTAL_EXECS=0
TOTAL_EPS=0
TOTAL_CRASHES=0
TOTAL_PFAV=0
TOTAL_PENDING=0
if [ "$SUMMARY_ONLY" = "" ]; then
echo "Individual fuzzers"
echo "=================="
echo
fi
for i in `find . -maxdepth 2 -iname fuzzer_stats | sort`; do
sed 's/^command_line.*$/_skip:1/;s/[ ]*:[ ]*/="/;s/$/"/' "$i" >"$TMP"
. "$TMP"
RUN_UNIX=$((CUR_TIME - start_time))
RUN_DAYS=$((RUN_UNIX / 60 / 60 / 24))
RUN_HRS=$(((RUN_UNIX / 60 / 60) % 24))
if [ "$SUMMARY_ONLY" = "" ]; then
echo ">>> $afl_banner ($RUN_DAYS days, $RUN_HRS hrs) <<<"
echo
fi
if ! kill -0 "$fuzzer_pid" 2>/dev/null; then
if [ "$SUMMARY_ONLY" = "" ]; then
echo " Instance is dead or running remotely, skipping."
echo
fi
DEAD_CNT=$((DEAD_CNT + 1))
continue
fi
ALIVE_CNT=$((ALIVE_CNT + 1))
EXEC_SEC=$((execs_done / RUN_UNIX))
PATH_PERC=$((cur_path * 100 / paths_total))
TOTAL_TIME=$((TOTAL_TIME + RUN_UNIX))
TOTAL_EPS=$((TOTAL_EPS + EXEC_SEC))
TOTAL_EXECS=$((TOTAL_EXECS + execs_done))
TOTAL_CRASHES=$((TOTAL_CRASHES + unique_crashes))
TOTAL_PENDING=$((TOTAL_PENDING + pending_total))
TOTAL_PFAV=$((TOTAL_PFAV + pending_favs))
if [ "$SUMMARY_ONLY" = "" ]; then
echo " cycle $((cycles_done + 1)), lifetime speed $EXEC_SEC execs/sec, path $cur_path/$paths_total (${PATH_PERC}%)"
if [ "$unique_crashes" = "0" ]; then
echo " pending $pending_favs/$pending_total, coverage $bitmap_cvg, no crashes yet"
else
echo " pending $pending_favs/$pending_total, coverage $bitmap_cvg, crash count $unique_crashes (!)"
fi
echo
fi
done
rm -f "$TMP"
TOTAL_DAYS=$((TOTAL_TIME / 60 / 60 / 24))
TOTAL_HRS=$(((TOTAL_TIME / 60 / 60) % 24))
test "$TOTAL_TIME" = "0" && TOTAL_TIME=1
echo "Summary stats"
echo "============="
echo
echo " Fuzzers alive : $ALIVE_CNT"
if [ ! "$DEAD_CNT" = "0" ]; then
echo " Dead or remote : $DEAD_CNT (excluded from stats)"
fi
echo " Total run time : $TOTAL_DAYS days, $TOTAL_HRS hours"
echo " Total execs : $((TOTAL_EXECS / 1000 / 1000)) million"
echo " Cumulative speed : $TOTAL_EPS execs/sec"
echo " Pending paths : $TOTAL_PFAV faves, $TOTAL_PENDING total"
if [ "$ALIVE_CNT" -gt "1" ]; then
echo " Pending per fuzzer : $((TOTAL_PFAV/ALIVE_CNT)) faves, $((TOTAL_PENDING/ALIVE_CNT)) total (on average)"
fi
echo " Crashes found : $TOTAL_CRASHES locally unique"
echo
exit 0
#!/bin/sh
#
# american fuzzy lop - status check tool
# --------------------------------------
#
# Written and maintained by Michal Zalewski <lcamtuf@google.com>
#
# Copyright 2015 Google LLC All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# This tool summarizes the status of any locally-running synchronized
# instances of afl-fuzz.
#
echo "status check tool for afl-fuzz by <lcamtuf@google.com>"
echo
if [ "$1" = "-s" ]; then
SUMMARY_ONLY=1
DIR="$2"
else
unset SUMMARY_ONLY
DIR="$1"
fi
if [ "$DIR" = "" ]; then
echo "Usage: $0 [ -s ] afl_sync_dir" 1>&2
echo 1>&2
echo "The -s option causes the tool to skip all the per-fuzzer trivia and show" 1>&2
echo "just the summary results. See docs/parallel_fuzzing.txt for additional tips." 1>&2
echo 1>&2
exit 1
fi
cd "$DIR" || exit 1
if [ -d queue ]; then
echo "[-] Error: parameter is an individual output directory, not a sync dir." 1>&2
exit 1
fi
CUR_TIME=`date +%s`
TMP=`mktemp -t .afl-whatsup-XXXXXXXX` || TMP=`mktemp -p /data/local/tmp .afl-whatsup-XXXXXXXX` || exit 1
ALIVE_CNT=0
DEAD_CNT=0
TOTAL_TIME=0
TOTAL_EXECS=0
TOTAL_EPS=0
TOTAL_CRASHES=0
TOTAL_PFAV=0
TOTAL_PENDING=0
if [ "$SUMMARY_ONLY" = "" ]; then
echo "Individual fuzzers"
echo "=================="
echo
fi
for i in `find . -maxdepth 2 -iname fuzzer_stats | sort`; do
sed 's/^command_line.*$/_skip:1/;s/[ ]*:[ ]*/="/;s/$/"/' "$i" >"$TMP"
. "$TMP"
RUN_UNIX=$((CUR_TIME - start_time))
RUN_DAYS=$((RUN_UNIX / 60 / 60 / 24))
RUN_HRS=$(((RUN_UNIX / 60 / 60) % 24))
if [ "$SUMMARY_ONLY" = "" ]; then
echo ">>> $afl_banner ($RUN_DAYS days, $RUN_HRS hrs) <<<"
echo
fi
if ! kill -0 "$fuzzer_pid" 2>/dev/null; then
if [ "$SUMMARY_ONLY" = "" ]; then
echo " Instance is dead or running remotely, skipping."
echo
fi
DEAD_CNT=$((DEAD_CNT + 1))
continue
fi
ALIVE_CNT=$((ALIVE_CNT + 1))
EXEC_SEC=$((execs_done / RUN_UNIX))
PATH_PERC=$((cur_path * 100 / paths_total))
TOTAL_TIME=$((TOTAL_TIME + RUN_UNIX))
TOTAL_EPS=$((TOTAL_EPS + EXEC_SEC))
TOTAL_EXECS=$((TOTAL_EXECS + execs_done))
TOTAL_CRASHES=$((TOTAL_CRASHES + unique_crashes))
TOTAL_PENDING=$((TOTAL_PENDING + pending_total))
TOTAL_PFAV=$((TOTAL_PFAV + pending_favs))
if [ "$SUMMARY_ONLY" = "" ]; then
echo " cycle $((cycles_done + 1)), lifetime speed $EXEC_SEC execs/sec, path $cur_path/$paths_total (${PATH_PERC}%)"
if [ "$unique_crashes" = "0" ]; then
echo " pending $pending_favs/$pending_total, coverage $bitmap_cvg, no crashes yet"
else
echo " pending $pending_favs/$pending_total, coverage $bitmap_cvg, crash count $unique_crashes (!)"
fi
echo
fi
done
rm -f "$TMP"
TOTAL_DAYS=$((TOTAL_TIME / 60 / 60 / 24))
TOTAL_HRS=$(((TOTAL_TIME / 60 / 60) % 24))
test "$TOTAL_TIME" = "0" && TOTAL_TIME=1
echo "Summary stats"
echo "============="
echo
echo " Fuzzers alive : $ALIVE_CNT"
if [ ! "$DEAD_CNT" = "0" ]; then
echo " Dead or remote : $DEAD_CNT (excluded from stats)"
fi
echo " Total run time : $TOTAL_DAYS days, $TOTAL_HRS hours"
echo " Total execs : $((TOTAL_EXECS / 1000 / 1000)) million"
echo " Cumulative speed : $TOTAL_EPS execs/sec"
echo " Pending paths : $TOTAL_PFAV faves, $TOTAL_PENDING total"
if [ "$ALIVE_CNT" -gt "1" ]; then
echo " Pending per fuzzer : $((TOTAL_PFAV/ALIVE_CNT)) faves, $((TOTAL_PENDING/ALIVE_CNT)) total (on average)"
fi
echo " Crashes found : $TOTAL_CRASHES locally unique"
echo
exit 0

File diff suppressed because it is too large Load Diff

@ -1,82 +1,82 @@
#ifdef __ANDROID__
#ifndef _ANDROID_ASHMEM_H
#define _ANDROID_ASHMEM_H
#include <fcntl.h>
#include <linux/ashmem.h>
#include <linux/shm.h>
#include <sys/ioctl.h>
#include <sys/mman.h>
#if __ANDROID_API__ >= 26
#define shmat bionic_shmat
#define shmctl bionic_shmctl
#define shmdt bionic_shmdt
#define shmget bionic_shmget
#endif
#include <sys/shm.h>
#undef shmat
#undef shmctl
#undef shmdt
#undef shmget
#include <stdio.h>
#define ASHMEM_DEVICE "/dev/ashmem"
static inline int shmctl(int __shmid, int __cmd, struct shmid_ds *__buf) {
int ret = 0;
if (__cmd == IPC_RMID) {
int length = ioctl(__shmid, ASHMEM_GET_SIZE, NULL);
struct ashmem_pin pin = {0, length};
ret = ioctl(__shmid, ASHMEM_UNPIN, &pin);
close(__shmid);
}
return ret;
}
static inline int shmget(key_t __key, size_t __size, int __shmflg) {
(void) __shmflg;
int fd, ret;
char ourkey[11];
fd = open(ASHMEM_DEVICE, O_RDWR);
if (fd < 0)
return fd;
sprintf(ourkey, "%d", __key);
ret = ioctl(fd, ASHMEM_SET_NAME, ourkey);
if (ret < 0)
goto error;
ret = ioctl(fd, ASHMEM_SET_SIZE, __size);
if (ret < 0)
goto error;
return fd;
error:
close(fd);
return ret;
}
static inline void *shmat(int __shmid, const void *__shmaddr, int __shmflg) {
(void) __shmflg;
int size;
void *ptr;
size = ioctl(__shmid, ASHMEM_GET_SIZE, NULL);
if (size < 0) {
return NULL;
}
ptr = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, __shmid, 0);
if (ptr == MAP_FAILED) {
return NULL;
}
return ptr;
}
#endif /* !_ANDROID_ASHMEM_H */
#endif /* !__ANDROID__ */
#ifdef __ANDROID__
#ifndef _ANDROID_ASHMEM_H
#define _ANDROID_ASHMEM_H
#include <fcntl.h>
#include <linux/ashmem.h>
#include <linux/shm.h>
#include <sys/ioctl.h>
#include <sys/mman.h>
#if __ANDROID_API__ >= 26
#define shmat bionic_shmat
#define shmctl bionic_shmctl
#define shmdt bionic_shmdt
#define shmget bionic_shmget
#endif
#include <sys/shm.h>
#undef shmat
#undef shmctl
#undef shmdt
#undef shmget
#include <stdio.h>
#define ASHMEM_DEVICE "/dev/ashmem"
static inline int shmctl(int __shmid, int __cmd, struct shmid_ds *__buf) {
int ret = 0;
if (__cmd == IPC_RMID) {
int length = ioctl(__shmid, ASHMEM_GET_SIZE, NULL);
struct ashmem_pin pin = {0, length};
ret = ioctl(__shmid, ASHMEM_UNPIN, &pin);
close(__shmid);
}
return ret;
}
static inline int shmget(key_t __key, size_t __size, int __shmflg) {
(void) __shmflg;
int fd, ret;
char ourkey[11];
fd = open(ASHMEM_DEVICE, O_RDWR);
if (fd < 0)
return fd;
sprintf(ourkey, "%d", __key);
ret = ioctl(fd, ASHMEM_SET_NAME, ourkey);
if (ret < 0)
goto error;
ret = ioctl(fd, ASHMEM_SET_SIZE, __size);
if (ret < 0)
goto error;
return fd;
error:
close(fd);
return ret;
}
static inline void *shmat(int __shmid, const void *__shmaddr, int __shmflg) {
(void) __shmflg;
int size;
void *ptr;
size = ioctl(__shmid, ASHMEM_GET_SIZE, NULL);
if (size < 0) {
return NULL;
}
ptr = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, __shmid, 0);
if (ptr == MAP_FAILED) {
return NULL;
}
return ptr;
}
#endif /* !_ANDROID_ASHMEM_H */
#endif /* !__ANDROID__ */

@ -1,362 +1,362 @@
/*
Copyright 2013 Google LLC All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at:
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
american fuzzy lop - vaguely configurable bits
----------------------------------------------
Written and maintained by Michal Zalewski <lcamtuf@google.com>
*/
#ifndef _HAVE_CONFIG_H
#define _HAVE_CONFIG_H
#include "types.h"
/* Version string: */
#define VERSION "2.57b"
/******************************************************
* *
* Settings that may be of interest to power users: *
* *
******************************************************/
/* Comment out to disable terminal colors (note that this makes afl-analyze
a lot less nice): */
#define USE_COLOR
/* Comment out to disable fancy ANSI boxes and use poor man's 7-bit UI: */
#define FANCY_BOXES
/* Default timeout for fuzzed code (milliseconds). This is the upper bound,
also used for detecting hangs; the actual value is auto-scaled: */
#define EXEC_TIMEOUT 1000
/* Timeout rounding factor when auto-scaling (milliseconds): */
#define EXEC_TM_ROUND 20
/* 64bit arch MACRO */
#if (defined (__x86_64__) || defined (__arm64__) || defined (__aarch64__))
#define WORD_SIZE_64 1
#endif
/* Default memory limit for child process (MB): */
#ifndef WORD_SIZE_64
# define MEM_LIMIT 25
#else
# define MEM_LIMIT 50
#endif /* ^!WORD_SIZE_64 */
/* Default memory limit when running in QEMU mode (MB): */
#define MEM_LIMIT_QEMU 200
/* Number of calibration cycles per every new test case (and for test
cases that show variable behavior): */
#define CAL_CYCLES 8
#define CAL_CYCLES_LONG 40
/* Number of subsequent timeouts before abandoning an input file: */
#define TMOUT_LIMIT 250
/* Maximum number of unique hangs or crashes to record: */
#define KEEP_UNIQUE_HANG 500
#define KEEP_UNIQUE_CRASH 5000
/* Baseline number of random tweaks during a single 'havoc' stage: */
#define HAVOC_CYCLES 256
#define HAVOC_CYCLES_INIT 1024
/* Maximum multiplier for the above (should be a power of two, beware
of 32-bit int overflows): */
#define HAVOC_MAX_MULT 16
/* Absolute minimum number of havoc cycles (after all adjustments): */
#define HAVOC_MIN 16
/* Maximum stacking for havoc-stage tweaks. The actual value is calculated
like this:
n = random between 1 and HAVOC_STACK_POW2
stacking = 2^n
In other words, the default (n = 7) produces 2, 4, 8, 16, 32, 64, or
128 stacked tweaks: */
#define HAVOC_STACK_POW2 7
/* Caps on block sizes for cloning and deletion operations. Each of these
ranges has a 33% probability of getting picked, except for the first
two cycles where smaller blocks are favored: */
#define HAVOC_BLK_SMALL 32
#define HAVOC_BLK_MEDIUM 128
#define HAVOC_BLK_LARGE 1500
/* Extra-large blocks, selected very rarely (<5% of the time): */
#define HAVOC_BLK_XL 32768
/* Probabilities of skipping non-favored entries in the queue, expressed as
percentages: */
#define SKIP_TO_NEW_PROB 99 /* ...when there are new, pending favorites */
#define SKIP_NFAV_OLD_PROB 95 /* ...no new favs, cur entry already fuzzed */
#define SKIP_NFAV_NEW_PROB 75 /* ...no new favs, cur entry not fuzzed yet */
/* Splicing cycle count: */
#define SPLICE_CYCLES 15
/* Nominal per-splice havoc cycle length: */
#define SPLICE_HAVOC 32
/* Maximum offset for integer addition / subtraction stages: */
#define ARITH_MAX 35
/* Limits for the test case trimmer. The absolute minimum chunk size; and
the starting and ending divisors for chopping up the input file: */
#define TRIM_MIN_BYTES 4
#define TRIM_START_STEPS 16
#define TRIM_END_STEPS 1024
/* Maximum size of input file, in bytes (keep under 100MB): */
#define MAX_FILE (1 * 1024 * 1024)
/* The same, for the test case minimizer: */
#define TMIN_MAX_FILE (10 * 1024 * 1024)
/* Block normalization steps for afl-tmin: */
#define TMIN_SET_MIN_SIZE 4
#define TMIN_SET_STEPS 128
/* Maximum dictionary token size (-x), in bytes: */
#define MAX_DICT_FILE 128
/* Length limits for auto-detected dictionary tokens: */
#define MIN_AUTO_EXTRA 3
#define MAX_AUTO_EXTRA 32
/* Maximum number of user-specified dictionary tokens to use in deterministic
steps; past this point, the "extras/user" step will be still carried out,
but with proportionally lower odds: */
#define MAX_DET_EXTRAS 200
/* Maximum number of auto-extracted dictionary tokens to actually use in fuzzing
(first value), and to keep in memory as candidates. The latter should be much
higher than the former. */
#define USE_AUTO_EXTRAS 50
#define MAX_AUTO_EXTRAS (USE_AUTO_EXTRAS * 10)
/* Scaling factor for the effector map used to skip some of the more
expensive deterministic steps. The actual divisor is set to
2^EFF_MAP_SCALE2 bytes: */
#define EFF_MAP_SCALE2 3
/* Minimum input file length at which the effector logic kicks in: */
#define EFF_MIN_LEN 128
/* Maximum effector density past which everything is just fuzzed
unconditionally (%): */
#define EFF_MAX_PERC 90
/* UI refresh frequency (Hz): */
#define UI_TARGET_HZ 5
/* Fuzzer stats file and plot update intervals (sec): */
#define STATS_UPDATE_SEC 60
#define PLOT_UPDATE_SEC 5
/* Smoothing divisor for CPU load and exec speed stats (1 - no smoothing). */
#define AVG_SMOOTHING 16
/* Sync interval (every n havoc cycles): */
#define SYNC_INTERVAL 5
/* Output directory reuse grace period (minutes): */
#define OUTPUT_GRACE 25
/* Uncomment to use simple file names (id_NNNNNN): */
// #define SIMPLE_FILES
/* List of interesting values to use in fuzzing. */
#define INTERESTING_8 \
-128, /* Overflow signed 8-bit when decremented */ \
-1, /* */ \
0, /* */ \
1, /* */ \
16, /* One-off with common buffer size */ \
32, /* One-off with common buffer size */ \
64, /* One-off with common buffer size */ \
100, /* One-off with common buffer size */ \
127 /* Overflow signed 8-bit when incremented */
#define INTERESTING_16 \
-32768, /* Overflow signed 16-bit when decremented */ \
-129, /* Overflow signed 8-bit */ \
128, /* Overflow signed 8-bit */ \
255, /* Overflow unsig 8-bit when incremented */ \
256, /* Overflow unsig 8-bit */ \
512, /* One-off with common buffer size */ \
1000, /* One-off with common buffer size */ \
1024, /* One-off with common buffer size */ \
4096, /* One-off with common buffer size */ \
32767 /* Overflow signed 16-bit when incremented */
#define INTERESTING_32 \
-2147483648LL, /* Overflow signed 32-bit when decremented */ \
-100663046, /* Large negative number (endian-agnostic) */ \
-32769, /* Overflow signed 16-bit */ \
32768, /* Overflow signed 16-bit */ \
65535, /* Overflow unsig 16-bit when incremented */ \
65536, /* Overflow unsig 16 bit */ \
100663045, /* Large positive number (endian-agnostic) */ \
2147483647 /* Overflow signed 32-bit when incremented */
/***********************************************************
* *
* Really exotic stuff you probably don't want to touch: *
* *
***********************************************************/
/* Call count interval between reseeding the libc PRNG from /dev/urandom: */
#define RESEED_RNG 10000
/* Maximum line length passed from GCC to 'as' and used for parsing
configuration files: */
#define MAX_LINE 8192
/* Environment variable used to pass SHM ID to the called program. */
#define SHM_ENV_VAR "__AFL_SHM_ID"
/* Other less interesting, internal-only variables. */
#define CLANG_ENV_VAR "__AFL_CLANG_MODE"
#define AS_LOOP_ENV_VAR "__AFL_AS_LOOPCHECK"
#define PERSIST_ENV_VAR "__AFL_PERSISTENT"
#define DEFER_ENV_VAR "__AFL_DEFER_FORKSRV"
/* In-code signatures for deferred and persistent mode. */
#define PERSIST_SIG "##SIG_AFL_PERSISTENT##"
#define DEFER_SIG "##SIG_AFL_DEFER_FORKSRV##"
/* Distinctive bitmap signature used to indicate failed execution: */
#define EXEC_FAIL_SIG 0xfee1dead
/* Distinctive exit code used to indicate MSAN trip condition: */
#define MSAN_ERROR 86
/* Designated file descriptors for forkserver commands (the application will
use FORKSRV_FD and FORKSRV_FD + 1): */
#define FORKSRV_FD 198
/* Fork server init timeout multiplier: we'll wait the user-selected
timeout plus this much for the fork server to spin up. */
#define FORK_WAIT_MULT 10
/* Calibration timeout adjustments, to be a bit more generous when resuming
fuzzing sessions or trying to calibrate already-added internal finds.
The first value is a percentage, the other is in milliseconds: */
#define CAL_TMOUT_PERC 125
#define CAL_TMOUT_ADD 50
/* Number of chances to calibrate a case before giving up: */
#define CAL_CHANCES 3
/* Map size for the traced binary (2^MAP_SIZE_POW2). Must be greater than
2; you probably want to keep it under 18 or so for performance reasons
(adjusting AFL_INST_RATIO when compiling is probably a better way to solve
problems with complex programs). You need to recompile the target binary
after changing this - otherwise, SEGVs may ensue. */
#define MAP_SIZE_POW2 16
#define MAP_SIZE (1 << MAP_SIZE_POW2)
/* Maximum allocator request size (keep well under INT_MAX): */
#define MAX_ALLOC 0x40000000
/* A made-up hashing seed: */
#define HASH_CONST 0xa5b35705
/* Constants for afl-gotcpu to control busy loop timing: */
#define CTEST_TARGET_MS 5000
#define CTEST_CORE_TRG_MS 1000
#define CTEST_BUSY_CYCLES (10 * 1000 * 1000)
/* Uncomment this to use inferior block-coverage-based instrumentation. Note
that you need to recompile the target binary for this to have any effect: */
// #define COVERAGE_ONLY
/* Uncomment this to ignore hit counts and output just one bit per tuple.
As with the previous setting, you will need to recompile the target
binary: */
// #define SKIP_COUNTS
/* Uncomment this to use instrumentation data to record newly discovered paths,
but do not use them as seeds for fuzzing. This is useful for conveniently
measuring coverage that could be attained by a "dumb" fuzzing algorithm: */
// #define IGNORE_FINDS
#endif /* ! _HAVE_CONFIG_H */
/*
Copyright 2013 Google LLC All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at:
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
american fuzzy lop - vaguely configurable bits
----------------------------------------------
Written and maintained by Michal Zalewski <lcamtuf@google.com>
*/
#ifndef _HAVE_CONFIG_H
#define _HAVE_CONFIG_H
#include "types.h"
/* Version string: */
#define VERSION "2.57b"
/******************************************************
* *
* Settings that may be of interest to power users: *
* *
******************************************************/
/* Comment out to disable terminal colors (note that this makes afl-analyze
a lot less nice): */
#define USE_COLOR
/* Comment out to disable fancy ANSI boxes and use poor man's 7-bit UI: */
#define FANCY_BOXES
/* Default timeout for fuzzed code (milliseconds). This is the upper bound,
also used for detecting hangs; the actual value is auto-scaled: */
#define EXEC_TIMEOUT 1000
/* Timeout rounding factor when auto-scaling (milliseconds): */
#define EXEC_TM_ROUND 20
/* 64bit arch MACRO */
#if (defined (__x86_64__) || defined (__arm64__) || defined (__aarch64__))
#define WORD_SIZE_64 1
#endif
/* Default memory limit for child process (MB): */
#ifndef WORD_SIZE_64
# define MEM_LIMIT 25
#else
# define MEM_LIMIT 50
#endif /* ^!WORD_SIZE_64 */
/* Default memory limit when running in QEMU mode (MB): */
#define MEM_LIMIT_QEMU 200
/* Number of calibration cycles per every new test case (and for test
cases that show variable behavior): */
#define CAL_CYCLES 8
#define CAL_CYCLES_LONG 40
/* Number of subsequent timeouts before abandoning an input file: */
#define TMOUT_LIMIT 250
/* Maximum number of unique hangs or crashes to record: */
#define KEEP_UNIQUE_HANG 500
#define KEEP_UNIQUE_CRASH 5000
/* Baseline number of random tweaks during a single 'havoc' stage: */
#define HAVOC_CYCLES 256
#define HAVOC_CYCLES_INIT 1024
/* Maximum multiplier for the above (should be a power of two, beware
of 32-bit int overflows): */
#define HAVOC_MAX_MULT 16
/* Absolute minimum number of havoc cycles (after all adjustments): */
#define HAVOC_MIN 16
/* Maximum stacking for havoc-stage tweaks. The actual value is calculated
like this:
n = random between 1 and HAVOC_STACK_POW2
stacking = 2^n
In other words, the default (n = 7) produces 2, 4, 8, 16, 32, 64, or
128 stacked tweaks: */
#define HAVOC_STACK_POW2 7
/* Caps on block sizes for cloning and deletion operations. Each of these
ranges has a 33% probability of getting picked, except for the first
two cycles where smaller blocks are favored: */
#define HAVOC_BLK_SMALL 32
#define HAVOC_BLK_MEDIUM 128
#define HAVOC_BLK_LARGE 1500
/* Extra-large blocks, selected very rarely (<5% of the time): */
#define HAVOC_BLK_XL 32768
/* Probabilities of skipping non-favored entries in the queue, expressed as
percentages: */
#define SKIP_TO_NEW_PROB 99 /* ...when there are new, pending favorites */
#define SKIP_NFAV_OLD_PROB 95 /* ...no new favs, cur entry already fuzzed */
#define SKIP_NFAV_NEW_PROB 75 /* ...no new favs, cur entry not fuzzed yet */
/* Splicing cycle count: */
#define SPLICE_CYCLES 15
/* Nominal per-splice havoc cycle length: */
#define SPLICE_HAVOC 32
/* Maximum offset for integer addition / subtraction stages: */
#define ARITH_MAX 35
/* Limits for the test case trimmer. The absolute minimum chunk size; and
the starting and ending divisors for chopping up the input file: */
#define TRIM_MIN_BYTES 4
#define TRIM_START_STEPS 16
#define TRIM_END_STEPS 1024
/* Maximum size of input file, in bytes (keep under 100MB): */
#define MAX_FILE (1 * 1024 * 1024)
/* The same, for the test case minimizer: */
#define TMIN_MAX_FILE (10 * 1024 * 1024)
/* Block normalization steps for afl-tmin: */
#define TMIN_SET_MIN_SIZE 4
#define TMIN_SET_STEPS 128
/* Maximum dictionary token size (-x), in bytes: */
#define MAX_DICT_FILE 128
/* Length limits for auto-detected dictionary tokens: */
#define MIN_AUTO_EXTRA 3
#define MAX_AUTO_EXTRA 32
/* Maximum number of user-specified dictionary tokens to use in deterministic
steps; past this point, the "extras/user" step will be still carried out,
but with proportionally lower odds: */
#define MAX_DET_EXTRAS 200
/* Maximum number of auto-extracted dictionary tokens to actually use in fuzzing
(first value), and to keep in memory as candidates. The latter should be much
higher than the former. */
#define USE_AUTO_EXTRAS 50
#define MAX_AUTO_EXTRAS (USE_AUTO_EXTRAS * 10)
/* Scaling factor for the effector map used to skip some of the more
expensive deterministic steps. The actual divisor is set to
2^EFF_MAP_SCALE2 bytes: */
#define EFF_MAP_SCALE2 3
/* Minimum input file length at which the effector logic kicks in: */
#define EFF_MIN_LEN 128
/* Maximum effector density past which everything is just fuzzed
unconditionally (%): */
#define EFF_MAX_PERC 90
/* UI refresh frequency (Hz): */
#define UI_TARGET_HZ 5
/* Fuzzer stats file and plot update intervals (sec): */
#define STATS_UPDATE_SEC 60
#define PLOT_UPDATE_SEC 5
/* Smoothing divisor for CPU load and exec speed stats (1 - no smoothing). */
#define AVG_SMOOTHING 16
/* Sync interval (every n havoc cycles): */
#define SYNC_INTERVAL 5
/* Output directory reuse grace period (minutes): */
#define OUTPUT_GRACE 25
/* Uncomment to use simple file names (id_NNNNNN): */
// #define SIMPLE_FILES
/* List of interesting values to use in fuzzing. */
#define INTERESTING_8 \
-128, /* Overflow signed 8-bit when decremented */ \
-1, /* */ \
0, /* */ \
1, /* */ \
16, /* One-off with common buffer size */ \
32, /* One-off with common buffer size */ \
64, /* One-off with common buffer size */ \
100, /* One-off with common buffer size */ \
127 /* Overflow signed 8-bit when incremented */
#define INTERESTING_16 \
-32768, /* Overflow signed 16-bit when decremented */ \
-129, /* Overflow signed 8-bit */ \
128, /* Overflow signed 8-bit */ \
255, /* Overflow unsig 8-bit when incremented */ \
256, /* Overflow unsig 8-bit */ \
512, /* One-off with common buffer size */ \
1000, /* One-off with common buffer size */ \
1024, /* One-off with common buffer size */ \
4096, /* One-off with common buffer size */ \
32767 /* Overflow signed 16-bit when incremented */
#define INTERESTING_32 \
-2147483648LL, /* Overflow signed 32-bit when decremented */ \
-100663046, /* Large negative number (endian-agnostic) */ \
-32769, /* Overflow signed 16-bit */ \
32768, /* Overflow signed 16-bit */ \
65535, /* Overflow unsig 16-bit when incremented */ \
65536, /* Overflow unsig 16 bit */ \
100663045, /* Large positive number (endian-agnostic) */ \
2147483647 /* Overflow signed 32-bit when incremented */
/***********************************************************
* *
* Really exotic stuff you probably don't want to touch: *
* *
***********************************************************/
/* Call count interval between reseeding the libc PRNG from /dev/urandom: */
#define RESEED_RNG 10000
/* Maximum line length passed from GCC to 'as' and used for parsing
configuration files: */
#define MAX_LINE 8192
/* Environment variable used to pass SHM ID to the called program. */
#define SHM_ENV_VAR "__AFL_SHM_ID"
/* Other less interesting, internal-only variables. */
#define CLANG_ENV_VAR "__AFL_CLANG_MODE"
#define AS_LOOP_ENV_VAR "__AFL_AS_LOOPCHECK"
#define PERSIST_ENV_VAR "__AFL_PERSISTENT"
#define DEFER_ENV_VAR "__AFL_DEFER_FORKSRV"
/* In-code signatures for deferred and persistent mode. */
#define PERSIST_SIG "##SIG_AFL_PERSISTENT##"
#define DEFER_SIG "##SIG_AFL_DEFER_FORKSRV##"
/* Distinctive bitmap signature used to indicate failed execution: */
#define EXEC_FAIL_SIG 0xfee1dead
/* Distinctive exit code used to indicate MSAN trip condition: */
#define MSAN_ERROR 86
/* Designated file descriptors for forkserver commands (the application will
use FORKSRV_FD and FORKSRV_FD + 1): */
#define FORKSRV_FD 198
/* Fork server init timeout multiplier: we'll wait the user-selected
timeout plus this much for the fork server to spin up. */
#define FORK_WAIT_MULT 10
/* Calibration timeout adjustments, to be a bit more generous when resuming
fuzzing sessions or trying to calibrate already-added internal finds.
The first value is a percentage, the other is in milliseconds: */
#define CAL_TMOUT_PERC 125
#define CAL_TMOUT_ADD 50
/* Number of chances to calibrate a case before giving up: */
#define CAL_CHANCES 3
/* Map size for the traced binary (2^MAP_SIZE_POW2). Must be greater than
2; you probably want to keep it under 18 or so for performance reasons
(adjusting AFL_INST_RATIO when compiling is probably a better way to solve
problems with complex programs). You need to recompile the target binary
after changing this - otherwise, SEGVs may ensue. */
#define MAP_SIZE_POW2 16
#define MAP_SIZE (1 << MAP_SIZE_POW2)
/* Maximum allocator request size (keep well under INT_MAX): */
#define MAX_ALLOC 0x40000000
/* A made-up hashing seed: */
#define HASH_CONST 0xa5b35705
/* Constants for afl-gotcpu to control busy loop timing: */
#define CTEST_TARGET_MS 5000
#define CTEST_CORE_TRG_MS 1000
#define CTEST_BUSY_CYCLES (10 * 1000 * 1000)
/* Uncomment this to use inferior block-coverage-based instrumentation. Note
that you need to recompile the target binary for this to have any effect: */
// #define COVERAGE_ONLY
/* Uncomment this to ignore hit counts and output just one bit per tuple.
As with the previous setting, you will need to recompile the target
binary: */
// #define SKIP_COUNTS
/* Uncomment this to use instrumentation data to record newly discovered paths,
but do not use them as seeds for fuzzing. This is useful for conveniently
measuring coverage that could be attained by a "dumb" fuzzing algorithm: */
// #define IGNORE_FINDS
#endif /* ! _HAVE_CONFIG_H */

@ -1,258 +1,258 @@
/*
Copyright 2013 Google LLC All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at:
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
american fuzzy lop - debug / error handling macros
--------------------------------------------------
Written and maintained by Michal Zalewski <lcamtuf@google.com>
*/
#ifndef _HAVE_DEBUG_H
#define _HAVE_DEBUG_H
#include <errno.h>
#include "types.h"
#include "config.h"
/*******************
* Terminal colors *
*******************/
#ifdef USE_COLOR
# define cBLK "\x1b[0;30m"
# define cRED "\x1b[0;31m"
# define cGRN "\x1b[0;32m"
# define cBRN "\x1b[0;33m"
# define cBLU "\x1b[0;34m"
# define cMGN "\x1b[0;35m"
# define cCYA "\x1b[0;36m"
# define cLGR "\x1b[0;37m"
# define cGRA "\x1b[1;90m"
# define cLRD "\x1b[1;91m"
# define cLGN "\x1b[1;92m"
# define cYEL "\x1b[1;93m"
# define cLBL "\x1b[1;94m"
# define cPIN "\x1b[1;95m"
# define cLCY "\x1b[1;96m"
# define cBRI "\x1b[1;97m"
# define cRST "\x1b[0m"
# define bgBLK "\x1b[40m"
# define bgRED "\x1b[41m"
# define bgGRN "\x1b[42m"
# define bgBRN "\x1b[43m"
# define bgBLU "\x1b[44m"
# define bgMGN "\x1b[45m"
# define bgCYA "\x1b[46m"
# define bgLGR "\x1b[47m"
# define bgGRA "\x1b[100m"
# define bgLRD "\x1b[101m"
# define bgLGN "\x1b[102m"
# define bgYEL "\x1b[103m"
# define bgLBL "\x1b[104m"
# define bgPIN "\x1b[105m"
# define bgLCY "\x1b[106m"
# define bgBRI "\x1b[107m"
#else
# define cBLK ""
# define cRED ""
# define cGRN ""
# define cBRN ""
# define cBLU ""
# define cMGN ""
# define cCYA ""
# define cLGR ""
# define cGRA ""
# define cLRD ""
# define cLGN ""
# define cYEL ""
# define cLBL ""
# define cPIN ""
# define cLCY ""
# define cBRI ""
# define cRST ""
# define bgBLK ""
# define bgRED ""
# define bgGRN ""
# define bgBRN ""
# define bgBLU ""
# define bgMGN ""
# define bgCYA ""
# define bgLGR ""
# define bgGRA ""
# define bgLRD ""
# define bgLGN ""
# define bgYEL ""
# define bgLBL ""
# define bgPIN ""
# define bgLCY ""
# define bgBRI ""
#endif /* ^USE_COLOR */
/*************************
* Box drawing sequences *
*************************/
#ifdef FANCY_BOXES
# define SET_G1 "\x1b)0" /* Set G1 for box drawing */
# define RESET_G1 "\x1b)B" /* Reset G1 to ASCII */
# define bSTART "\x0e" /* Enter G1 drawing mode */
# define bSTOP "\x0f" /* Leave G1 drawing mode */
# define bH "q" /* Horizontal line */
# define bV "x" /* Vertical line */
# define bLT "l" /* Left top corner */
# define bRT "k" /* Right top corner */
# define bLB "m" /* Left bottom corner */
# define bRB "j" /* Right bottom corner */
# define bX "n" /* Cross */
# define bVR "t" /* Vertical, branch right */
# define bVL "u" /* Vertical, branch left */
# define bHT "v" /* Horizontal, branch top */
# define bHB "w" /* Horizontal, branch bottom */
#else
# define SET_G1 ""
# define RESET_G1 ""
# define bSTART ""
# define bSTOP ""
# define bH "-"
# define bV "|"
# define bLT "+"
# define bRT "+"
# define bLB "+"
# define bRB "+"
# define bX "+"
# define bVR "+"
# define bVL "+"
# define bHT "+"
# define bHB "+"
#endif /* ^FANCY_BOXES */
/***********************
* Misc terminal codes *
***********************/
#define TERM_HOME "\x1b[H"
#define TERM_CLEAR TERM_HOME "\x1b[2J"
#define cEOL "\x1b[0K"
#define CURSOR_HIDE "\x1b[?25l"
#define CURSOR_SHOW "\x1b[?25h"
/************************
* Debug & error macros *
************************/
/* Just print stuff to the appropriate stream. */
#ifdef MESSAGES_TO_STDOUT
# define SAYF(x...) printf(x)
#else
# define SAYF(x...) fprintf(stderr, x)
#endif /* ^MESSAGES_TO_STDOUT */
/* Show a prefixed warning. */
#define WARNF(x...) do { \
SAYF(cYEL "[!] " cBRI "WARNING: " cRST x); \
SAYF(cRST "\n"); \
} while (0)
/* Show a prefixed "doing something" message. */
#define ACTF(x...) do { \
SAYF(cLBL "[*] " cRST x); \
SAYF(cRST "\n"); \
} while (0)
/* Show a prefixed "success" message. */
#define OKF(x...) do { \
SAYF(cLGN "[+] " cRST x); \
SAYF(cRST "\n"); \
} while (0)
/* Show a prefixed fatal error message (not used in afl). */
#define BADF(x...) do { \
SAYF(cLRD "\n[-] " cRST x); \
SAYF(cRST "\n"); \
} while (0)
/* Die with a verbose non-OS fatal error message. */
#define FATAL(x...) do { \
SAYF(bSTOP RESET_G1 CURSOR_SHOW cRST cLRD "\n[-] PROGRAM ABORT : " \
cBRI x); \
SAYF(cLRD "\n Location : " cRST "%s(), %s:%u\n\n", \
__FUNCTION__, __FILE__, __LINE__); \
exit(1); \
} while (0)
/* Die by calling abort() to provide a core dump. */
#define ABORT(x...) do { \
SAYF(bSTOP RESET_G1 CURSOR_SHOW cRST cLRD "\n[-] PROGRAM ABORT : " \
cBRI x); \
SAYF(cLRD "\n Stop location : " cRST "%s(), %s:%u\n\n", \
__FUNCTION__, __FILE__, __LINE__); \
abort(); \
} while (0)
/* Die while also including the output of perror(). */
#define PFATAL(x...) do { \
fflush(stdout); \
SAYF(bSTOP RESET_G1 CURSOR_SHOW cRST cLRD "\n[-] SYSTEM ERROR : " \
cBRI x); \
SAYF(cLRD "\n Stop location : " cRST "%s(), %s:%u\n", \
__FUNCTION__, __FILE__, __LINE__); \
SAYF(cLRD " OS message : " cRST "%s\n", strerror(errno)); \
exit(1); \
} while (0)
/* Die with FAULT() or PFAULT() depending on the value of res (used to
interpret different failure modes for read(), write(), etc). */
#define RPFATAL(res, x...) do { \
if (res < 0) PFATAL(x); else FATAL(x); \
} while (0)
/* Error-checking versions of read() and write() that call RPFATAL() as
appropriate. */
#define ck_write(fd, buf, len, fn) do { \
u32 _len = (len); \
s32 _res = write(fd, buf, _len); \
if (_res != _len) RPFATAL(_res, "Short write to %s", fn); \
} while (0)
#define ck_read(fd, buf, len, fn) do { \
u32 _len = (len); \
s32 _res = read(fd, buf, _len); \
if (_res != _len) RPFATAL(_res, "Short read from %s", fn); \
} while (0)
#endif /* ! _HAVE_DEBUG_H */
/*
Copyright 2013 Google LLC All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at:
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
american fuzzy lop - debug / error handling macros
--------------------------------------------------
Written and maintained by Michal Zalewski <lcamtuf@google.com>
*/
#ifndef _HAVE_DEBUG_H
#define _HAVE_DEBUG_H
#include <errno.h>
#include "types.h"
#include "config.h"
/*******************
* Terminal colors *
*******************/
#ifdef USE_COLOR
# define cBLK "\x1b[0;30m"
# define cRED "\x1b[0;31m"
# define cGRN "\x1b[0;32m"
# define cBRN "\x1b[0;33m"
# define cBLU "\x1b[0;34m"
# define cMGN "\x1b[0;35m"
# define cCYA "\x1b[0;36m"
# define cLGR "\x1b[0;37m"
# define cGRA "\x1b[1;90m"
# define cLRD "\x1b[1;91m"
# define cLGN "\x1b[1;92m"
# define cYEL "\x1b[1;93m"
# define cLBL "\x1b[1;94m"
# define cPIN "\x1b[1;95m"
# define cLCY "\x1b[1;96m"
# define cBRI "\x1b[1;97m"
# define cRST "\x1b[0m"
# define bgBLK "\x1b[40m"
# define bgRED "\x1b[41m"
# define bgGRN "\x1b[42m"
# define bgBRN "\x1b[43m"
# define bgBLU "\x1b[44m"
# define bgMGN "\x1b[45m"
# define bgCYA "\x1b[46m"
# define bgLGR "\x1b[47m"
# define bgGRA "\x1b[100m"
# define bgLRD "\x1b[101m"
# define bgLGN "\x1b[102m"
# define bgYEL "\x1b[103m"
# define bgLBL "\x1b[104m"
# define bgPIN "\x1b[105m"
# define bgLCY "\x1b[106m"
# define bgBRI "\x1b[107m"
#else
# define cBLK ""
# define cRED ""
# define cGRN ""
# define cBRN ""
# define cBLU ""
# define cMGN ""
# define cCYA ""
# define cLGR ""
# define cGRA ""
# define cLRD ""
# define cLGN ""
# define cYEL ""
# define cLBL ""
# define cPIN ""
# define cLCY ""
# define cBRI ""
# define cRST ""
# define bgBLK ""
# define bgRED ""
# define bgGRN ""
# define bgBRN ""
# define bgBLU ""
# define bgMGN ""
# define bgCYA ""
# define bgLGR ""
# define bgGRA ""
# define bgLRD ""
# define bgLGN ""
# define bgYEL ""
# define bgLBL ""
# define bgPIN ""
# define bgLCY ""
# define bgBRI ""
#endif /* ^USE_COLOR */
/*************************
* Box drawing sequences *
*************************/
#ifdef FANCY_BOXES
# define SET_G1 "\x1b)0" /* Set G1 for box drawing */
# define RESET_G1 "\x1b)B" /* Reset G1 to ASCII */
# define bSTART "\x0e" /* Enter G1 drawing mode */
# define bSTOP "\x0f" /* Leave G1 drawing mode */
# define bH "q" /* Horizontal line */
# define bV "x" /* Vertical line */
# define bLT "l" /* Left top corner */
# define bRT "k" /* Right top corner */
# define bLB "m" /* Left bottom corner */
# define bRB "j" /* Right bottom corner */
# define bX "n" /* Cross */
# define bVR "t" /* Vertical, branch right */
# define bVL "u" /* Vertical, branch left */
# define bHT "v" /* Horizontal, branch top */
# define bHB "w" /* Horizontal, branch bottom */
#else
# define SET_G1 ""
# define RESET_G1 ""
# define bSTART ""
# define bSTOP ""
# define bH "-"
# define bV "|"
# define bLT "+"
# define bRT "+"
# define bLB "+"
# define bRB "+"
# define bX "+"
# define bVR "+"
# define bVL "+"
# define bHT "+"
# define bHB "+"
#endif /* ^FANCY_BOXES */
/***********************
* Misc terminal codes *
***********************/
#define TERM_HOME "\x1b[H"
#define TERM_CLEAR TERM_HOME "\x1b[2J"
#define cEOL "\x1b[0K"
#define CURSOR_HIDE "\x1b[?25l"
#define CURSOR_SHOW "\x1b[?25h"
/************************
* Debug & error macros *
************************/
/* Just print stuff to the appropriate stream. */
#ifdef MESSAGES_TO_STDOUT
# define SAYF(x...) printf(x)
#else
# define SAYF(x...) fprintf(stderr, x)
#endif /* ^MESSAGES_TO_STDOUT */
/* Show a prefixed warning. */
#define WARNF(x...) do { \
SAYF(cYEL "[!] " cBRI "WARNING: " cRST x); \
SAYF(cRST "\n"); \
} while (0)
/* Show a prefixed "doing something" message. */
#define ACTF(x...) do { \
SAYF(cLBL "[*] " cRST x); \
SAYF(cRST "\n"); \
} while (0)
/* Show a prefixed "success" message. */
#define OKF(x...) do { \
SAYF(cLGN "[+] " cRST x); \
SAYF(cRST "\n"); \
} while (0)
/* Show a prefixed fatal error message (not used in afl). */
#define BADF(x...) do { \
SAYF(cLRD "\n[-] " cRST x); \
SAYF(cRST "\n"); \
} while (0)
/* Die with a verbose non-OS fatal error message. */
#define FATAL(x...) do { \
SAYF(bSTOP RESET_G1 CURSOR_SHOW cRST cLRD "\n[-] PROGRAM ABORT : " \
cBRI x); \
SAYF(cLRD "\n Location : " cRST "%s(), %s:%u\n\n", \
__FUNCTION__, __FILE__, __LINE__); \
exit(1); \
} while (0)
/* Die by calling abort() to provide a core dump. */
#define ABORT(x...) do { \
SAYF(bSTOP RESET_G1 CURSOR_SHOW cRST cLRD "\n[-] PROGRAM ABORT : " \
cBRI x); \
SAYF(cLRD "\n Stop location : " cRST "%s(), %s:%u\n\n", \
__FUNCTION__, __FILE__, __LINE__); \
abort(); \
} while (0)
/* Die while also including the output of perror(). */
#define PFATAL(x...) do { \
fflush(stdout); \
SAYF(bSTOP RESET_G1 CURSOR_SHOW cRST cLRD "\n[-] SYSTEM ERROR : " \
cBRI x); \
SAYF(cLRD "\n Stop location : " cRST "%s(), %s:%u\n", \
__FUNCTION__, __FILE__, __LINE__); \
SAYF(cLRD " OS message : " cRST "%s\n", strerror(errno)); \
exit(1); \
} while (0)
/* Die with FAULT() or PFAULT() depending on the value of res (used to
interpret different failure modes for read(), write(), etc). */
#define RPFATAL(res, x...) do { \
if (res < 0) PFATAL(x); else FATAL(x); \
} while (0)
/* Error-checking versions of read() and write() that call RPFATAL() as
appropriate. */
#define ck_write(fd, buf, len, fn) do { \
u32 _len = (len); \
s32 _res = write(fd, buf, _len); \
if (_res != _len) RPFATAL(_res, "Short write to %s", fn); \
} while (0)
#define ck_read(fd, buf, len, fn) do { \
u32 _len = (len); \
s32 _res = read(fd, buf, _len); \
if (_res != _len) RPFATAL(_res, "Short read from %s", fn); \
} while (0)
#endif /* ! _HAVE_DEBUG_H */

@ -1,202 +1,202 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

Before

Width:  |  Height:  |  Size: 581 KiB

After

Width:  |  Height:  |  Size: 581 KiB

Before

Width:  |  Height:  |  Size: 892 B

After

Width:  |  Height:  |  Size: 892 B

Before

Width:  |  Height:  |  Size: 1.7 KiB

After

Width:  |  Height:  |  Size: 1.7 KiB

Before

Width:  |  Height:  |  Size: 38 B

After

Width:  |  Height:  |  Size: 38 B

Before

Width:  |  Height:  |  Size: 179 B

After

Width:  |  Height:  |  Size: 179 B

Before

Width:  |  Height:  |  Size: 642 B

After

Width:  |  Height:  |  Size: 642 B

Before

Width:  |  Height:  |  Size: 595 B

After

Width:  |  Height:  |  Size: 595 B

Before

Width:  |  Height:  |  Size: 876 B

After

Width:  |  Height:  |  Size: 876 B

Before

Width:  |  Height:  |  Size: 293 B

After

Width:  |  Height:  |  Size: 293 B

Before

Width:  |  Height:  |  Size: 434 B

After

Width:  |  Height:  |  Size: 434 B

Before

Width:  |  Height:  |  Size: 996 B

After

Width:  |  Height:  |  Size: 996 B

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save