你的提交信息

pull/9/head
zhongfengrong 8 months ago
parent c6ecd5d6b6
commit f823fe6219

@ -1,89 +1,91 @@
#!/usr/bin/env bash
#
# american fuzzy lop - corpus minimization tool
# American Fuzzy Lop - 语料库最小化工具
# ---------------------------------------------
#
# Written and maintained by Michal Zalewski <lcamtuf@google.com>
# 作者和维护者:Michal Zalewski <lcamtuf@google.com>
#
# Copyright 2014, 2015 Google LLC All rights reserved.
# 版权所有 2014, 2015 Google LLC 保留所有权利。
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
# 根据 Apache 许可证 2.0 版("许可证")授权;
# 除非符合许可证的规定,否则您不得使用此文件。
# 您可以从以下网址获取许可证的副本:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# This tool tries to find the smallest subset of files in the input directory
# that still trigger the full range of instrumentation data points seen in
# the starting corpus. This has two uses:
# 此工具尝试查找输入目录中最小的文件子集,
# 该子集仍然触发启动语料库中看到的所有仪器数据点。
# 这有两个用途:
#
# - Screening large corpora of input files before using them as a seed for
# afl-fuzz. The tool will remove functionally redundant files and likely
# leave you with a much smaller set.
# - 在将大的输入文件用作 afl-fuzz 的种子之前筛选。
# 该工具将删除功能上冗余的文件,并可能
# 留下一个更小的集合。
#
# (In this case, you probably also want to consider running afl-tmin on
# the individual files later on to reduce their size.)
# (在这种情况下,您可能还想考虑稍后对
# 各个文件运行 afl-tmin 以减少其大小。)
#
# - Minimizing the corpus generated organically by afl-fuzz, perhaps when
# planning to feed it to more resource-intensive tools. The tool achieves
# this by removing all entries that used to trigger unique behaviors in the
# past, but have been made obsolete by later finds.
# - 最小化由 afl-fuzz 自然生成的语料库,
# 可能在计划将其供给更多资源密集型工具时。
# 该工具通过删除所有曾经触发独特行为的条目实现此目的,
# 但这些条目已被后来的结果取代。
#
# Note that the tool doesn't modify the files themselves. For that, you want
# afl-tmin.
# 请注意,该工具不会修改文件本身。
# 对于此,您希望使用 afl-tmin。
#
# This script must use bash because other shells may have hardcoded limits on
# array sizes.
# 此脚本必须使用 bash因为其他 shell 可能对
# 数组大小有硬编码限制。
#
echo "corpus minimization tool for afl-fuzz by <lcamtuf@google.com>"
echo "为 afl-fuzz 提供的语料库最小化工具 <lcamtuf@google.com>" # 输出程序名称
echo
#########
# SETUP #
# 设置 #
#########
# Process command-line options...
# 处理命令行选项...
MEM_LIMIT=100
TIMEOUT=none
MEM_LIMIT=100 # 内存限制初始值为 100 MB
TIMEOUT=none # 超时初始值为无
# 取消设置以下变量
unset IN_DIR OUT_DIR STDIN_FILE EXTRA_PAR MEM_LIMIT_GIVEN \
AFL_CMIN_CRASHES_ONLY AFL_CMIN_ALLOW_ANY QEMU_MODE
# 解析命令行选项
while getopts "+i:o:f:m:t:eQC" opt; do
case "$opt" in
"i")
"i") # 输入目录选项
IN_DIR="$OPTARG"
;;
"o")
"o") # 输出目录选项
OUT_DIR="$OPTARG"
;;
"f")
"f") # 从中读取的模糊程序位置(标准输入)
STDIN_FILE="$OPTARG"
;;
"m")
"m") # 内存限制
MEM_LIMIT="$OPTARG"
MEM_LIMIT_GIVEN=1
;;
"t")
"t") # 超时时间
TIMEOUT="$OPTARG"
;;
"e")
"e") # 额外参数
EXTRA_PAR="$EXTRA_PAR -e"
;;
"C")
"C") # 仅保留崩溃输入
export AFL_CMIN_CRASHES_ONLY=1
;;
"Q")
"Q") # 使用仅二进制的仪器QEMU 模式)
EXTRA_PAR="$EXTRA_PAR -Q"
test "$MEM_LIMIT_GIVEN" = "" && MEM_LIMIT=250
QEMU_MODE=1
;;
"?")
"?") # 无效选项
exit 1
;;
@ -91,84 +93,84 @@ while getopts "+i:o:f:m:t:eQC" opt; do
done
shift $((OPTIND-1))
shift $((OPTIND-1)) # 移动位置参数
TARGET_BIN="$1"
TARGET_BIN="$1" # 目标二进制文件
# 检查必需参数是否缺失
if [ "$TARGET_BIN" = "" -o "$IN_DIR" = "" -o "$OUT_DIR" = "" ]; then
# 输出用法信息到标准错误
cat 1>&2 <<_EOF_
Usage: $0 [ options ] -- /path/to/target_app [ ... ]
使用: $0 [选项] -- /path/to/target_app [ ... ]
Required parameters:
所需参数:
-i dir - input directory with the starting corpus
-o dir - output directory for minimized files
-i dir - 包含起始语料库的输入目录
-o dir - 最小化文件的输出目录
Execution control settings:
执行控制设置:
-f file - location read by the fuzzed program (stdin)
-m megs - memory limit for child process ($MEM_LIMIT MB)
-t msec - run time limit for child process (none)
-Q - use binary-only instrumentation (QEMU mode)
-f file - 由模糊程序读取的位置(标准输入)
-m megs - 子进程的内存限制($MEM_LIMIT MB
-t msec - 子进程的运行时间限制(无)
-Q - 使用仅二进制的仪器QEMU 模式)
Minimization settings:
最小化设置:
-C - keep crashing inputs, reject everything else
-e - solve for edge coverage only, ignore hit counts
-C - 保留崩溃输入,拒绝其他所有内容
-e - 仅解决边缘覆盖,忽略命中计数
For additional tips, please consult docs/README.
有关其他提示,请参阅 docs/README。
_EOF_
exit 1
fi
# Do a sanity check to discourage the use of /tmp, since we can't really
# handle this safely from a shell script.
# 进行完整性检查,避免使用 /tmp因为我们无法安全处理它。
if [ "$AFL_ALLOW_TMP" = "" ]; then
echo "$IN_DIR" | grep -qE '^(/var)?/tmp/'
echo "$IN_DIR" | grep -qE '^(/var)?/tmp/' # 检查输入目录是否在/tmp
T1="$?"
echo "$TARGET_BIN" | grep -qE '^(/var)?/tmp/'
echo "$TARGET_BIN" | grep -qE '^(/var)?/tmp/' # 检查目标二进制文件是否在/tmp
T2="$?"
echo "$OUT_DIR" | grep -qE '^(/var)?/tmp/'
echo "$OUT_DIR" | grep -qE '^(/var)?/tmp/' # 检查输出目录是否在/tmp
T3="$?"
echo "$STDIN_FILE" | grep -qE '^(/var)?/tmp/'
echo "$STDIN_FILE" | grep -qE '^(/var)?/tmp/' # 检查标准输入文件是否在/tmp
T4="$?"
echo "$PWD" | grep -qE '^(/var)?/tmp/'
echo "$PWD" | grep -qE '^(/var)?/tmp/' # 检查当前工作目录是否在/tmp
T5="$?"
if [ "$T1" = "0" -o "$T2" = "0" -o "$T3" = "0" -o "$T4" = "0" -o "$T5" = "0" ]; then
echo "[-] Error: do not use this script in /tmp or /var/tmp." 1>&2
echo "[-] 错误: 请勿在 /tmp 或 /var/tmp 中使用此脚本。" 1>&2
exit 1
fi
fi
# If @@ is specified, but there's no -f, let's come up with a temporary input
# file name.
# 如果指定了 @@,但没有 -f创建一个临时输入文件名。
TRACE_DIR="$OUT_DIR/.traces"
if [ "$STDIN_FILE" = "" ]; then
if echo "$*" | grep -qF '@@'; then
STDIN_FILE="$TRACE_DIR/.cur_input"
STDIN_FILE="$TRACE_DIR/.cur_input" # 使用当前输入文件名
fi
fi
# Check for obvious errors.
# 检查明显的错误。
if [ ! "$MEM_LIMIT" = "none" ]; then
if [ "$MEM_LIMIT" -lt "5" ]; then
echo "[-] Error: dangerously low memory limit." 1>&2
echo "[-] 错误: 内存限制过低。" 1>&2
exit 1
fi
@ -177,7 +179,7 @@ fi
if [ ! "$TIMEOUT" = "none" ]; then
if [ "$TIMEOUT" -lt "10" ]; then
echo "[-] Error: dangerously low timeout." 1>&2
echo "[-] 错误: 超时过低。" 1>&2
exit 1
fi
@ -185,92 +187,91 @@ fi
if [ ! -f "$TARGET_BIN" -o ! -x "$TARGET_BIN" ]; then
TNEW="`which "$TARGET_BIN" 2>/dev/null`"
TNEW="`which "$TARGET_BIN" 2>/dev/null`" # 查找目标二进制文件的路径
if [ ! -f "$TNEW" -o ! -x "$TNEW" ]; then
echo "[-] Error: binary '$TARGET_BIN' not found or not executable." 1>&2
echo "[-] 错误: 未找到或不可执行的二进制文件 '$TARGET_BIN'。" 1>&2
exit 1
fi
TARGET_BIN="$TNEW"
TARGET_BIN="$TNEW" # 更新目标二进制文件路径
fi
if [ "$AFL_SKIP_BIN_CHECK" = "" -a "$QEMU_MODE" = "" ]; then
if ! grep -qF "__AFL_SHM_ID" "$TARGET_BIN"; then
echo "[-] Error: binary '$TARGET_BIN' doesn't appear to be instrumented." 1>&2
echo "[-] 错误: 二进制文件 '$TARGET_BIN' 似乎没有被仪器化。" 1>&2
exit 1
fi
fi
if [ ! -d "$IN_DIR" ]; then
echo "[-] Error: directory '$IN_DIR' not found." 1>&2
echo "[-] 错误: 目录 '$IN_DIR' 未找到。" 1>&2
exit 1
fi
test -d "$IN_DIR/queue" && IN_DIR="$IN_DIR/queue"
test -d "$IN_DIR/queue" && IN_DIR="$IN_DIR/queue" # 如果存在队列目录,则更新输入目录
find "$OUT_DIR" -name 'id[:_]*' -maxdepth 1 -exec rm -- {} \; 2>/dev/null
rm -rf "$TRACE_DIR" 2>/dev/null
find "$OUT_DIR" -name 'id[:_]*' -maxdepth 1 -exec rm -- {} \; 2>/dev/null # 删除输出目录中的旧轨迹
rm -rf "$TRACE_DIR" 2>/dev/null # 删除临时轨迹目录
rmdir "$OUT_DIR" 2>/dev/null
rmdir "$OUT_DIR" 2>/dev/null # 删除输出目录
if [ -d "$OUT_DIR" ]; then
echo "[-] Error: directory '$OUT_DIR' exists and is not empty - delete it first." 1>&2
echo "[-] 错误: 目录 '$OUT_DIR' 已存在且非空 - 请先删除它。" 1>&2
exit 1
fi
mkdir -m 700 -p "$TRACE_DIR" || exit 1
mkdir -m 700 -p "$TRACE_DIR" || exit 1 # 创建临时轨迹目录并设置权限
if [ ! "$STDIN_FILE" = "" ]; then
rm -f "$STDIN_FILE" || exit 1
touch "$STDIN_FILE" || exit 1
rm -f "$STDIN_FILE" || exit 1 # 删除旧的标准输入文件
touch "$STDIN_FILE" || exit 1 # 创建新的标准输入文件
fi
if [ "$AFL_PATH" = "" ]; then
SHOWMAP="${0%/afl-cmin}/afl-showmap"
SHOWMAP="${0%/afl-cmin}/afl-showmap" # 设置 afl-showmap 的路径
else
SHOWMAP="$AFL_PATH/afl-showmap"
SHOWMAP="$AFL_PATH/afl-showmap" # 使用 AFL_PATH 中指定的路径
fi
if [ ! -x "$SHOWMAP" ]; then
echo "[-] Error: can't find 'afl-showmap' - please set AFL_PATH." 1>&2
rm -rf "$TRACE_DIR"
echo "[-] 错误: 找不到 'afl-showmap' - 请设置 AFL_PATH。" 1>&2
rm -rf "$TRACE_DIR" # 删除临时轨迹目录
exit 1
fi
IN_COUNT=$((`ls -- "$IN_DIR" 2>/dev/null | wc -l`))
IN_COUNT=$((`ls -- "$IN_DIR" 2>/dev/null | wc -l`)) # 计算输入目录中的文件数量
if [ "$IN_COUNT" = "0" ]; then
echo "[+] Hmm, no inputs in the target directory. Nothing to be done."
rm -rf "$TRACE_DIR"
echo "[+] 嗯,目标目录中没有输入。无需处理。"
rm -rf "$TRACE_DIR" # 删除临时轨迹目录
exit 1
fi
FIRST_FILE=`ls "$IN_DIR" | head -1`
FIRST_FILE=`ls "$IN_DIR" | head -1` # 获取第一个文件名
# Make sure that we're not dealing with a directory.
# 确保不是处理目录。
if [ -d "$IN_DIR/$FIRST_FILE" ]; then
echo "[-] Error: The target directory contains subdirectories - please fix." 1>&2
rm -rf "$TRACE_DIR"
echo "[-] 错误: 目标目录包含子目录 - 请修复。" 1>&2
rm -rf "$TRACE_DIR" # 删除临时轨迹目录
exit 1
fi
# Check for the more efficient way to copy files...
# 检查复制文件的更有效方法...
if ln "$IN_DIR/$FIRST_FILE" "$TRACE_DIR/.link_test" 2>/dev/null; then
CP_TOOL=ln
CP_TOOL=ln # 如果可以链接,则设置复制工具为 ln
else
CP_TOOL=cp
CP_TOOL=cp # 否则使用 cp
fi
# Make sure that we can actually get anything out of afl-showmap before we
# waste too much time.
# 确保我们能从 afl-showmap 中获取任何信息,以免浪费时间。
echo "[*] Testing the target binary..."
echo "[*] 测试目标二进制文件..."
if [ "$STDIN_FILE" = "" ]; then
@ -278,43 +279,42 @@ if [ "$STDIN_FILE" = "" ]; then
else
cp "$IN_DIR/$FIRST_FILE" "$STDIN_FILE"
cp "$IN_DIR/$FIRST_FILE" "$STDIN_FILE" # 复制第一个文件到标准输入文件
AFL_CMIN_ALLOW_ANY=1 "$SHOWMAP" -m "$MEM_LIMIT" -t "$TIMEOUT" -o "$TRACE_DIR/.run_test" -Z $EXTRA_PAR -A "$STDIN_FILE" -- "$@" </dev/null
fi
FIRST_COUNT=$((`grep -c . "$TRACE_DIR/.run_test"`))
FIRST_COUNT=$((`grep -c . "$TRACE_DIR/.run_test"`)) # 计算运行测试的输出
if [ "$FIRST_COUNT" -gt "0" ]; then
echo "[+] OK, $FIRST_COUNT tuples recorded."
echo "[+] 好的,记录了 $FIRST_COUNT 个元组。"
else
echo "[-] Error: no instrumentation output detected (perhaps crash or timeout)." 1>&2
test "$AFL_KEEP_TRACES" = "" && rm -rf "$TRACE_DIR"
echo "[-] 错误: 未检测到仪器输出(可能崩溃或超时)。" 1>&2
test "$AFL_KEEP_TRACES" = "" && rm -rf "$TRACE_DIR" # 删除临时轨迹
exit 1
fi
# Let's roll!
# 开始工作!
#############################
# STEP 1: COLLECTING TRACES #
# 步骤 1收集轨迹 #
#############################
echo "[*] Obtaining traces for input files in '$IN_DIR'..."
echo "[*] 获取 '$IN_DIR' 中输入文件的轨迹..."
(
CUR=0
CUR=0 # 当前文件计数器
if [ "$STDIN_FILE" = "" ]; then
while read -r fn; do
while read -r fn; do # 逐行读取输入文件名
CUR=$((CUR+1))
printf "\\r Processing file $CUR/$IN_COUNT... "
printf "\\r 正在处理文件 $CUR/$IN_COUNT... " # 输出当前进度
"$SHOWMAP" -m "$MEM_LIMIT" -t "$TIMEOUT" -o "$TRACE_DIR/$fn" -Z $EXTRA_PAR -- "$@" <"$IN_DIR/$fn"
@ -322,18 +322,16 @@ echo "[*] Obtaining traces for input files in '$IN_DIR'..."
else
while read -r fn; do
while read -r fn; do # 逐行读取输入文件名
CUR=$((CUR+1))
printf "\\r Processing file $CUR/$IN_COUNT... "
printf "\\r 正在处理文件 $CUR/$IN_COUNT... " # 输出当前进度
cp "$IN_DIR/$fn" "$STDIN_FILE"
cp "$IN_DIR/$fn" "$STDIN_FILE" # 复制文件到标准输入文件
"$SHOWMAP" -m "$MEM_LIMIT" -t "$TIMEOUT" -o "$TRACE_DIR/$fn" -Z $EXTRA_PAR -A "$STDIN_FILE" -- "$@" </dev/null
done < <(ls "$IN_DIR")
fi
)
@ -341,121 +339,117 @@ echo "[*] Obtaining traces for input files in '$IN_DIR'..."
echo
##########################
# STEP 2: SORTING TUPLES #
# 步骤 2排序元组 #
##########################
# With this out of the way, we sort all tuples by popularity across all
# datasets. The reasoning here is that we won't be able to avoid the files
# that trigger unique tuples anyway, so we will want to start with them and
# see what's left.
# 完成这一步后,我们将按流行度对所有元组进行排序。
# 理由是我们无法避免触发唯一元组的文件,
# 所以我们将从这些文件开始,看看剩下的是什么。
echo "[*] Sorting trace sets (this may take a while)..."
echo "[*] 排序轨迹集(这可能需要一段时间)..."
ls "$IN_DIR" | sed "s#^#$TRACE_DIR/#" | tr '\n' '\0' | xargs -0 -n 1 cat | \
sort | uniq -c | sort -n >"$TRACE_DIR/.all_uniq"
sort | uniq -c | sort -n >"$TRACE_DIR/.all_uniq" # 获取所有唯一元组
TUPLE_COUNT=$((`grep -c . "$TRACE_DIR/.all_uniq"`))
TUPLE_COUNT=$((`grep -c . "$TRACE_DIR/.all_uniq"`)) # 计算唯一元组数量
echo "[+] Found $TUPLE_COUNT unique tuples across $IN_COUNT files."
echo "[+] 找到 $TUPLE_COUNT 个唯一元组,遍历了 $IN_COUNT 个文件。"
#####################################
# STEP 3: SELECTING CANDIDATE FILES #
# 步骤 3选择候选文件 #
#####################################
# The next step is to find the best candidate for each tuple. The "best"
# part is understood simply as the smallest input that includes a particular
# tuple in its trace. Empirical evidence suggests that this produces smaller
# datasets than more involved algorithms that could be still pulled off in
# a shell script.
# 下一步是找到每个元组的最佳候选者。这里的“最佳”
# 指的是包含特定元组的最小输入文件。
# 经验表明这比更复杂的算法要好,
# 而这些算法在 shell 脚本中仍然可以执行。
echo "[*] Finding best candidates for each tuple..."
echo "[*] 寻找每个元组的最佳候选者..."
CUR=0
while read -r fn; do
while read -r fn; do # 逐行读取输入文件名
CUR=$((CUR+1))
printf "\\r Processing file $CUR/$IN_COUNT... "
printf "\\r 正在处理文件 $CUR/$IN_COUNT... " # 输出当前进度
sed "s#\$# $fn#" "$TRACE_DIR/$fn" >>"$TRACE_DIR/.candidate_list"
sed "s#\$# $fn#" "$TRACE_DIR/$fn" >>"$TRACE_DIR/.candidate_list" # 将元组与文件名关联
done < <(ls -rS "$IN_DIR")
done < <(ls -rS "$IN_DIR") # 按文件大小倒序列出文件名
echo
##############################
# STEP 4: LOADING CANDIDATES #
# 步骤 4加载候选 #
##############################
# At this point, we have a file of tuple-file pairs, sorted by file size
# in ascending order (as a consequence of ls -rS). By doing sort keyed
# only by tuple (-k 1,1) and configured to output only the first line for
# every key (-s -u), we end up with the smallest file for each tuple.
# 此时,我们有一个元组-文件对的文件,按文件大小升序排序
# (由于 ls -rS 的结果)。通过仅按元组排序 (-k 1,1)
# 并配置为对每个键的第一个输出行 (-s -u)
# 我们最终得到了每个元组的最小文件。
echo "[*] Sorting candidate list (be patient)..."
echo "[*] 排序候选列表(耐心等候)..."
sort -k1,1 -s -u "$TRACE_DIR/.candidate_list" | \
sed 's/^/BEST_FILE[/;s/ /]="/;s/$/"/' >"$TRACE_DIR/.candidate_script"
sed 's/^/BEST_FILE[/;s/ /]="/;s/$/"/' >"$TRACE_DIR/.candidate_script" # 创建候选脚本
if [ ! -s "$TRACE_DIR/.candidate_script" ]; then
echo "[-] Error: no traces obtained from test cases, check syntax!" 1>&2
echo "[-] 错误: 从测试用例中未获得轨迹,请检查语法!" 1>&2
test "$AFL_KEEP_TRACES" = "" && rm -rf "$TRACE_DIR"
exit 1
fi
# The sed command converted the sorted list to a shell script that populates
# BEST_FILE[tuple]="fname". Let's load that!
# sed 命令将排序后的列表转换为一个填充
# BEST_FILE[tuple]="fname" 的 shell 脚本。让我们加载它!
. "$TRACE_DIR/.candidate_script"
. "$TRACE_DIR/.candidate_script" # 执行候选脚本
##########################
# STEP 5: WRITING OUTPUT #
# 步骤 5写出输出 #
##########################
# The final trick is to grab the top pick for each tuple, unless said tuple is
# already set due to the inclusion of an earlier candidate; and then put all
# tuples associated with the newly-added file to the "already have" list. The
# loop works from least popular tuples and toward the most common ones.
# 最后一步是获取每个元组的最佳选择,除非由于包含
# 早期候选者而已经设置了该元组;然后将所有
# 与新添加文件关联的元组放入“已拥有”列表。
# 循环从最不常见的元组开始,到最常见的元组结束。
echo "[*] Processing candidates and writing output files..."
echo "[*] 处理候选并写入输出文件..."
CUR=0
touch "$TRACE_DIR/.already_have"
while read -r cnt tuple; do
touch "$TRACE_DIR/.already_have" # 创建已拥有文件
while read -r cnt tuple; do # 逐行读取元组和计数
CUR=$((CUR+1))
printf "\\r Processing tuple $CUR/$TUPLE_COUNT... "
printf "\\r 正在处理元组 $CUR/$TUPLE_COUNT... " # 输出当前进度
# If we already have this tuple, skip it.
# 如果我们已经拥有此元组,跳过它。
grep -q "^$tuple\$" "$TRACE_DIR/.already_have" && continue
grep -q "^$tuple\$" "$TRACE_DIR/.already_have" && continue # 检查已拥有列表
FN=${BEST_FILE[tuple]}
FN=${BEST_FILE[tuple]} # 获取最佳候选文件名
$CP_TOOL "$IN_DIR/$FN" "$OUT_DIR/$FN"
$CP_TOOL "$IN_DIR/$FN" "$OUT_DIR/$FN" # 复制文件到输出目录
if [ "$((CUR % 5))" = "0" ]; then
sort -u "$TRACE_DIR/$FN" "$TRACE_DIR/.already_have" >"$TRACE_DIR/.tmp"
mv -f "$TRACE_DIR/.tmp" "$TRACE_DIR/.already_have"
if [ "$((CUR % 5))" = "0" ]; then # 每处理五个元组时进行一次排序
sort -u "$TRACE_DIR/$FN" "$TRACE_DIR/.already_have" >"$TRACE_DIR/.tmp" # 合并并去重
mv -f "$TRACE_DIR/.tmp" "$TRACE_DIR/.already_have" # 替换原有的已拥有文件
else
cat "$TRACE_DIR/$FN" >>"$TRACE_DIR/.already_have"
cat "$TRACE_DIR/$FN" >>"$TRACE_DIR/.already_have" # 否则直接追加到已拥有文件
fi
done <"$TRACE_DIR/.all_uniq"
done <"$TRACE_DIR/.all_uniq" # 从所有唯一元组读取数据
echo
OUT_COUNT=`ls -- "$OUT_DIR" | wc -l`
OUT_COUNT=`ls -- "$OUT_DIR" | wc -l` # 统计输出目录中的文件数量
if [ "$OUT_COUNT" = "1" ]; then
echo "[!] WARNING: All test cases had the same traces, check syntax!"
echo "[!] 警告: 所有测试用例具有相同的轨迹,请检查语法!"
fi
echo "[+] Narrowed down to $OUT_COUNT files, saved in '$OUT_DIR'."
echo "[+] 已缩小到 $OUT_COUNT 个文件,保存在 '$OUT_DIR' 中。" # 输出结果数量
echo
test "$AFL_KEEP_TRACES" = "" && rm -rf "$TRACE_DIR"
test "$AFL_KEEP_TRACES" = "" && rm -rf "$TRACE_DIR" # 删除临时轨迹目录(如果需要)
exit 0
exit 0 # 正常退出脚本

@ -1,170 +1,175 @@
#!/bin/sh
#
# american fuzzy lop - Advanced Persistent Graphing
# American Fuzzy Lop - 高级持久图形化工具
# -------------------------------------------------
#
# Written and maintained by Michal Zalewski <lcamtuf@google.com>
# Based on a design & prototype by Michael Rash.
# 作者和维护者:Michal Zalewski <lcamtuf@google.com>
# 基于 Michael Rash 的设计和原型。
#
# Copyright 2014, 2015 Google LLC All rights reserved.
# 版权所有 2014, 2015 Google LLC 保留所有权利。
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
# 根据 Apache 许可证,版本 2.0"许可证")授权;
# 除非遵循许可证,否则您不能使用此文件。
# 您可以从以下网址获取许可证的副本:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
echo "progress plotting utility for afl-fuzz by <lcamtuf@google.com>"
echo "为 afl-fuzz 提供的进度绘图工具 <lcamtuf@google.com>" # 输出程序名称
echo
# 检查参数数量是否为 2
if [ ! "$#" = "2" ]; then
# 输出使用说明到标准错误
cat 1>&2 <<_EOF_
This program generates gnuplot images from afl-fuzz output data. Usage:
此程序从 afl-fuzz 输出数据生成 gnuplot 图像。用法:
$0 afl_state_dir graph_output_dir
The afl_state_dir parameter should point to an existing state directory for any
active or stopped instance of afl-fuzz; while graph_output_dir should point to
an empty directory where this tool can write the resulting plots to.
参数 afl_state_dir 应指向现有的状态目录,该目录属于任何
正在运行或已停止的 afl-fuzz 实例;而 graph_output_dir 应指向
一个空目录,在该目录中此工具可以写入结果图表。
The program will put index.html and three PNG images in the output directory;
you should be able to view it with any web browser of your choice.
该程序将在输出目录中放置 index.html 和三张 PNG 图像;
您应该能够用任何您喜欢的 web 浏览器查看它。
_EOF_
exit 1
exit 1 # 退出程序,返回错误状态
fi
# 如果 AFL_ALLOW_TMP 变量为空,则进行临时目录检查
if [ "$AFL_ALLOW_TMP" = "" ]; then
echo "$1" | grep -qE '^(/var)?/tmp/'
echo "$1" | grep -qE '^(/var)?/tmp/' # 检查第一个参数是否在/tmp
T1="$?"
echo "$2" | grep -qE '^(/var)?/tmp/'
echo "$2" | grep -qE '^(/var)?/tmp/' # 检查第二个参数是否在/tmp
T2="$?"
if [ "$T1" = "0" -o "$T2" = "0" ]; then
echo "[-] Error: this script shouldn't be used with shared /tmp directories." 1>&2
exit 1
echo "[-] 错误: 不应在共享 /tmp 目录中使用此脚本。" 1>&2 # 输出错误信息
exit 1 # 退出程序,返回错误状态
fi
fi
# 检查输入目录是否有效(必须存在 'plot_data' 文件)
if [ ! -f "$1/plot_data" ]; then
echo "[-] Error: input directory is not valid (missing 'plot_data')." 1>&2
exit 1
echo "[-] 错误: 输入目录无效(缺少 'plot_data' 文件)。" 1>&2 # 输出错误信息
exit 1 # 退出程序,返回错误状态
fi
# 从 fuzzer_stats 文件中提取 banner 信息
BANNER="`cat "$1/fuzzer_stats" | grep '^afl_banner ' | cut -d: -f2- | cut -b2-`"
test "$BANNER" = "" && BANNER="(none)" # 如果未找到 banner设置为 (none)
test "$BANNER" = "" && BANNER="(none)"
# 查找 gnuplot 命令
GNUPLOT=`which gnuplot 2>/dev/null`
# 检查是否能找到 gnuplot
if [ "$GNUPLOT" = "" ]; then
echo "[-] Error: can't find 'gnuplot' in your \$PATH." 1>&2
exit 1
echo "[-] 错误: 在您的 \$PATH 中找不到 'gnuplot'。" 1>&2 # 输出错误信息
exit 1 # 退出程序,返回错误状态
fi
# 创建输出目录,如果目录已经存在则忽略错误
mkdir "$2" 2>/dev/null
# 检查输出目录是否成功创建
if [ ! -d "$2" ]; then
echo "[-] Error: unable to create the output directory - pick another location." 1>&2
exit 1
echo "[-] 错误: 无法创建输出目录 - 请选择另一个位置。" 1>&2 # 输出错误信息
exit 1 # 退出程序,返回错误状态
fi
# 删除旧的图像文件
rm -f "$2/high_freq.png" "$2/low_freq.png" "$2/exec_speed.png"
mv -f "$2/index.html" "$2/index.html.orig" 2>/dev/null
mv -f "$2/index.html" "$2/index.html.orig" 2>/dev/null # 备份旧的 index.html 文件
echo "[*] Generating plots..."
echo "[*] 生成图表..." # 输出生成图表的提示
(
# gnuplot 脚本开始
cat <<_EOF_
set terminal png truecolor enhanced size 1000,300 butt
set terminal png truecolor enhanced size 1000,300 butt # 设置输出为 PNG 格式,启用颜色和大小
set output '$2/high_freq.png'
set output '$2/high_freq.png' # 设置输出文件为高频图像文件
set xdata time
set timefmt '%s'
set format x "%b %d\n%H:%M"
set tics font 'small'
unset mxtics
unset mytics
set xdata time # 设置 x 轴数据为时间
set timefmt '%s' # 设置时间格式
set format x "%b %d\n%H:%M" # 设置 x 轴刻度格式
set tics font 'small' # 设置刻度字体
unset mxtics # 禁用 x 轴的次刻度
unset mytics # 禁用 y 轴的次刻度
set grid xtics linetype 0 linecolor rgb '#e0e0e0'
set grid ytics linetype 0 linecolor rgb '#e0e0e0'
set border linecolor rgb '#50c0f0'
set tics textcolor rgb '#000000'
set key outside
set grid xtics linetype 0 linecolor rgb '#e0e0e0' # 设置 x 轴网格线样式和颜色
set grid ytics linetype 0 linecolor rgb '#e0e0e0' # 设置 y 轴网格线样式和颜色
set border linecolor rgb '#50c0f0' # 设置边框颜色
set tics textcolor rgb '#000000' # 设置刻度文本颜色
set key outside # 设置图例位置在外部
set autoscale xfixmin
set autoscale xfixmax
set autoscale xfixmin # 自动缩放 x 轴最小值
set autoscale xfixmax # 自动缩放 x 轴最大值
# 绘制高频图像
plot '$1/plot_data' using 1:4 with filledcurve x1 title 'total paths' linecolor rgb '#000000' fillstyle transparent solid 0.2 noborder, \\
'' using 1:3 with filledcurve x1 title 'current path' linecolor rgb '#f0f0f0' fillstyle transparent solid 0.5 noborder, \\
'' using 1:5 with lines title 'pending paths' linecolor rgb '#0090ff' linewidth 3, \\
'' using 1:6 with lines title 'pending favs' linecolor rgb '#c00080' linewidth 3, \\
'' using 1:2 with lines title 'cycles done' linecolor rgb '#c000f0' linewidth 3
set terminal png truecolor enhanced size 1000,200 butt
set output '$2/low_freq.png'
set terminal png truecolor enhanced size 1000,200 butt # 设置输出为低频图像文件
set output '$2/low_freq.png' # 设置输出文件为低频图像文件
# 绘制低频图像
plot '$1/plot_data' using 1:8 with filledcurve x1 title '' linecolor rgb '#c00080' fillstyle transparent solid 0.2 noborder, \\
'' using 1:8 with lines title ' uniq crashes' linecolor rgb '#c00080' linewidth 3, \\
'' using 1:9 with lines title 'uniq hangs' linecolor rgb '#c000f0' linewidth 3, \\
'' using 1:10 with lines title 'levels' linecolor rgb '#0090ff' linewidth 3
set terminal png truecolor enhanced size 1000,200 butt
set output '$2/exec_speed.png'
set terminal png truecolor enhanced size 1000,200 butt # 设置输出为执行速度图像文件
set output '$2/exec_speed.png' # 设置输出文件为执行速度图像文件
# 绘制执行速度图像
plot '$1/plot_data' using 1:11 with filledcurve x1 title '' linecolor rgb '#0090ff' fillstyle transparent solid 0.2 noborder, \\
'$1/plot_data' using 1:11 with lines title ' execs/sec' linecolor rgb '#0090ff' linewidth 3 smooth bezier;
'$1/plot_data' using 1:11 with lines title ' execs/sec' linecolor rgb '#0090ff' linewidth 3 smooth bezier; # 使用平滑贝塞尔曲线
_EOF_
) | gnuplot
) | gnuplot # 将生成的 gnuplot 脚本管道到 gnuplot
# 检查执行速度图像是否生成成功
if [ ! -s "$2/exec_speed.png" ]; then
echo "[-] Error: something went wrong! Perhaps you have an ancient version of gnuplot?" 1>&2
exit 1
echo "[-] 错误: 出现问题!您可能使用了古老版本的 gnuplot。" 1>&2 # 输出错误信息
exit 1 # 退出程序,返回错误状态
fi
echo "[*] Generating index.html..."
echo "[*] 生成 index.html..." # 输出生成 index.html 的提示
# 创建 index.html 文件
cat >"$2/index.html" <<_EOF_
<table style="font-family: 'Trebuchet MS', 'Tahoma', 'Arial', 'Helvetica'">
<tr><td style="width: 18ex"><b>Banner:</b></td><td>$BANNER</td></tr>
<tr><td><b>Directory:</b></td><td>$1</td></tr>
<tr><td><b>Generated on:</b></td><td>`date`</td></tr>
<tr><td style="width: 18ex"><b>Banner:</b></td><td>$BANNER</td></tr> # 显示 banner 信息
<tr><td><b>Directory:</b></td><td>$1</td></tr> # 显示输入目录
<tr><td><b>Generated on:</b></td><td>`date`</td></tr> # 显示生成日期
</table>
<p>
<img src="high_freq.png" width=1000 height=300><p>
<img src="low_freq.png" width=1000 height=200><p>
<img src="exec_speed.png" width=1000 height=200>
<img src="high_freq.png" width=1000 height=300><p> # 显示高频图像
<img src="low_freq.png" width=1000 height=200><p> # 显示低频图像
<img src="exec_speed.png" width=1000 height=200> # 显示执行速度图像
_EOF_
# Make it easy to remotely view results when outputting directly to a directory
# served by Apache or other HTTP daemon. Since the plots aren't horribly
# sensitive, this seems like a reasonable trade-off.
# 使在直接输出到由 Apache 或其他 HTTP 守护进程服务的目录时,
# 容易查看结果。由于图表不太敏感,这似乎是合理的权衡。
chmod 755 "$2"
chmod 644 "$2/high_freq.png" "$2/low_freq.png" "$2/exec_speed.png" "$2/index.html"
chmod 755 "$2" # 设置输出目录权限
chmod 644 "$2/high_freq.png" "$2/low_freq.png" "$2/exec_speed.png" "$2/index.html" # 设置文件权限
echo "[+] All done - enjoy your charts!"
echo "[+] 完成 - 享受您的图表!" # 输出完成提示
exit 0
exit 0 # 正常退出程序

File diff suppressed because it is too large Load Diff

@ -1,24 +1,24 @@
/*
Copyright 2013 Google LLC All rights reserved.
2013 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at:
Apache 2.0 "许可证"
使
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"按现状"
*/
/*
american fuzzy lop - debug / error handling macros
American Fuzzy Lop - /
--------------------------------------------------
Written and maintained by Michal Zalewski <lcamtuf@google.com>
Michal Zalewski <lcamtuf@google.com>
*/
#ifndef _HAVE_DEBUG_H
@ -30,49 +30,49 @@
#include "config.h"
/*******************
* Terminal colors *
* *
*******************/
#ifdef USE_COLOR
# define cBLK "\x1b[0;30m"
# define cRED "\x1b[0;31m"
# define cGRN "\x1b[0;32m"
# define cBRN "\x1b[0;33m"
# define cBLU "\x1b[0;34m"
# define cMGN "\x1b[0;35m"
# define cCYA "\x1b[0;36m"
# define cLGR "\x1b[0;37m"
# define cGRA "\x1b[1;90m"
# define cLRD "\x1b[1;91m"
# define cLGN "\x1b[1;92m"
# define cYEL "\x1b[1;93m"
# define cLBL "\x1b[1;94m"
# define cPIN "\x1b[1;95m"
# define cLCY "\x1b[1;96m"
# define cBRI "\x1b[1;97m"
# define cRST "\x1b[0m"
# define bgBLK "\x1b[40m"
# define bgRED "\x1b[41m"
# define bgGRN "\x1b[42m"
# define bgBRN "\x1b[43m"
# define bgBLU "\x1b[44m"
# define bgMGN "\x1b[45m"
# define bgCYA "\x1b[46m"
# define bgLGR "\x1b[47m"
# define bgGRA "\x1b[100m"
# define bgLRD "\x1b[101m"
# define bgLGN "\x1b[102m"
# define bgYEL "\x1b[103m"
# define bgLBL "\x1b[104m"
# define bgPIN "\x1b[105m"
# define bgLCY "\x1b[106m"
# define bgBRI "\x1b[107m"
# define cBLK "\x1b[0;30m" // 黑色文本
# define cRED "\x1b[0;31m" // 红色文本
# define cGRN "\x1b[0;32m" // 绿色文本
# define cBRN "\x1b[0;33m" // 棕色文本
# define cBLU "\x1b[0;34m" // 蓝色文本
# define cMGN "\x1b[0;35m" // 紫色文本
# define cCYA "\x1b[0;36m" // 青色文本
# define cLGR "\x1b[0;37m" // 浅灰色文本
# define cGRA "\x1b[1;90m" // 深灰色文本
# define cLRD "\x1b[1;91m" // 浅红色文本
# define cLGN "\x1b[1;92m" // 浅绿色文本
# define cYEL "\x1b[1;93m" // 浅黄色文本
# define cLBL "\x1b[1;94m" // 浅蓝色文本
# define cPIN "\x1b[1;95m" // 浅紫色文本
# define cLCY "\x1b[1;96m" // 浅青色文本
# define cBRI "\x1b[1;97m" // 白色文本
# define cRST "\x1b[0m" // 重置颜色
# define bgBLK "\x1b[40m" // 黑色背景
# define bgRED "\x1b[41m" // 红色背景
# define bgGRN "\x1b[42m" // 绿色背景
# define bgBRN "\x1b[43m" // 棕色背景
# define bgBLU "\x1b[44m" // 蓝色背景
# define bgMGN "\x1b[45m" // 紫色背景
# define bgCYA "\x1b[46m" // 青色背景
# define bgLGR "\x1b[47m" // 浅灰色背景
# define bgGRA "\x1b[100m" // 深灰色背景
# define bgLRD "\x1b[101m" // 浅红色背景
# define bgLGN "\x1b[102m" // 浅绿色背景
# define bgYEL "\x1b[103m" // 浅黄色背景
# define bgLBL "\x1b[104m" // 浅蓝色背景
# define bgPIN "\x1b[105m" // 浅紫色背景
# define bgLCY "\x1b[106m" // 浅青色背景
# define bgBRI "\x1b[107m" // 白色背景
#else
# define cBLK ""
# define cBLK "" // 不使用颜色
# define cRED ""
# define cGRN ""
# define cBRN ""
@ -90,7 +90,7 @@
# define cBRI ""
# define cRST ""
# define bgBLK ""
# define bgBLK "" // 不使用背景颜色
# define bgRED ""
# define bgGRN ""
# define bgBRN ""
@ -115,25 +115,25 @@
#ifdef FANCY_BOXES
# define SET_G1 "\x1b)0" /* Set G1 for box drawing */
# define RESET_G1 "\x1b)B" /* Reset G1 to ASCII */
# define bSTART "\x0e" /* Enter G1 drawing mode */
# define bSTOP "\x0f" /* Leave G1 drawing mode */
# define bH "q" /* Horizontal line */
# define bV "x" /* Vertical line */
# define bLT "l" /* Left top corner */
# define bRT "k" /* Right top corner */
# define bLB "m" /* Left bottom corner */
# define bRB "j" /* Right bottom corner */
# define bX "n" /* Cross */
# define bVR "t" /* Vertical, branch right */
# define bVL "u" /* Vertical, branch left */
# define bHT "v" /* Horizontal, branch top */
# define bHB "w" /* Horizontal, branch bottom */
# define SET_G1 "\x1b)0" /* 设置 G1 用于绘制框 */
# define RESET_G1 "\x1b)B" /* 重置 G1 为 ASCII 字符 */
# define bSTART "\x0e" /* 进入 G1 绘制模式 */
# define bSTOP "\x0f" /* 离开 G1 绘制模式 */
# define bH "q" /* 水平线 */
# define bV "x" /* 垂直线 */
# define bLT "l" /* 左上角 */
# define bRT "k" /* 右上角 */
# define bLB "m" /* 左下角 */
# define bRB "j" /* 右下角 */
# define bX "n" /* 交叉点 */
# define bVR "t" /* 垂直,右分支 */
# define bVL "u" /* 垂直,左分支 */
# define bHT "v" /* 水平,顶部分支 */
# define bHB "w" /* 水平,底部分支 */
#else
# define SET_G1 ""
# define SET_G1 "" // 不使用 G1 绘制框
# define RESET_G1 ""
# define bSTART ""
# define bSTOP ""
@ -152,107 +152,105 @@
#endif /* ^FANCY_BOXES */
/***********************
* Misc terminal codes *
* *
***********************/
#define TERM_HOME "\x1b[H"
#define TERM_CLEAR TERM_HOME "\x1b[2J"
#define cEOL "\x1b[0K"
#define CURSOR_HIDE "\x1b[?25l"
#define CURSOR_SHOW "\x1b[?25h"
#define TERM_HOME "\x1b[H" // 移动光标到屏幕左上角
#define TERM_CLEAR TERM_HOME "\x1b[2J" // 清除屏幕
#define cEOL "\x1b[0K" // 清除光标到行尾
#define CURSOR_HIDE "\x1b[?25l" // 隐藏光标
#define CURSOR_SHOW "\x1b[?25h" // 显示光标
/************************
* Debug & error macros *
* *
************************/
/* Just print stuff to the appropriate stream. */
/* 只是将内容打印到适当的输出流。 */
#ifdef MESSAGES_TO_STDOUT
# define SAYF(x...) printf(x)
# define SAYF(x...) printf(x) // 输出到标准输出
#else
# define SAYF(x...) fprintf(stderr, x)
# define SAYF(x...) fprintf(stderr, x) // 输出到标准错误
#endif /* ^MESSAGES_TO_STDOUT */
/* Show a prefixed warning. */
/* 显示带前缀的警告信息。 */
#define WARNF(x...) do { \
SAYF(cYEL "[!] " cBRI "WARNING: " cRST x); \
SAYF(cYEL "[!] " cBRI "警告: " cRST x); \
SAYF(cRST "\n"); \
} while (0)
/* Show a prefixed "doing something" message. */
/* 显示带前缀的"正在做某事"消息。 */
#define ACTF(x...) do { \
SAYF(cLBL "[*] " cRST x); \
SAYF(cRST "\n"); \
} while (0)
/* Show a prefixed "success" message. */
/* 显示带前缀的"成功"消息。 */
#define OKF(x...) do { \
SAYF(cLGN "[+] " cRST x); \
SAYF(cRST "\n"); \
} while (0)
/* Show a prefixed fatal error message (not used in afl). */
/* 显示带前缀的致命错误消息(未在 afl 中使用)。 */
#define BADF(x...) do { \
SAYF(cLRD "\n[-] " cRST x); \
SAYF(cRST "\n"); \
} while (0)
/* Die with a verbose non-OS fatal error message. */
/* 带有详细非操作系统致命错误消息退出程序。 */
#define FATAL(x...) do { \
SAYF(bSTOP RESET_G1 CURSOR_SHOW cRST cLRD "\n[-] PROGRAM ABORT : " \
SAYF(bSTOP RESET_G1 CURSOR_SHOW cRST cLRD "\n[-] 程序中止 : " \
cBRI x); \
SAYF(cLRD "\n Location : " cRST "%s(), %s:%u\n\n", \
SAYF(cLRD "\n 位置 : " cRST "%s(), %s:%u\n\n", \
__FUNCTION__, __FILE__, __LINE__); \
exit(1); \
} while (0)
/* Die by calling abort() to provide a core dump. */
/* 通过调用 abort() 以提供核心转储而退出。 */
#define ABORT(x...) do { \
SAYF(bSTOP RESET_G1 CURSOR_SHOW cRST cLRD "\n[-] PROGRAM ABORT : " \
SAYF(bSTOP RESET_G1 CURSOR_SHOW cRST cLRD "\n[-] 程序中止 : " \
cBRI x); \
SAYF(cLRD "\n Stop location : " cRST "%s(), %s:%u\n\n", \
SAYF(cLRD "\n 停止位置 : " cRST "%s(), %s:%u\n\n", \
__FUNCTION__, __FILE__, __LINE__); \
abort(); \
} while (0)
/* Die while also including the output of perror(). */
/* 在包含 perror() 输出的同时终止程序。 */
#define PFATAL(x...) do { \
fflush(stdout); \
SAYF(bSTOP RESET_G1 CURSOR_SHOW cRST cLRD "\n[-] SYSTEM ERROR : " \
SAYF(bSTOP RESET_G1 CURSOR_SHOW cRST cLRD "\n[-] 系统错误 : " \
cBRI x); \
SAYF(cLRD "\n Stop location : " cRST "%s(), %s:%u\n", \
SAYF(cLRD "\n 停止位置 : " cRST "%s(), %s:%u\n", \
__FUNCTION__, __FILE__, __LINE__); \
SAYF(cLRD " OS message : " cRST "%s\n", strerror(errno)); \
SAYF(cLRD " 操作系统消息 : " cRST "%s\n", strerror(errno)); \
exit(1); \
} while (0)
/* Die with FAULT() or PFAULT() depending on the value of res (used to
interpret different failure modes for read(), write(), etc). */
/* 根据 res 的值(用于解释 read()、write() 等的不同失败模式)调用 FATAL() 或 PFATAL()。 */
#define RPFATAL(res, x...) do { \
if (res < 0) PFATAL(x); else FATAL(x); \
} while (0)
/* Error-checking versions of read() and write() that call RPFATAL() as
appropriate. */
/* 检查错误的 read() 和 write() 的版本,在适当的情况下调用 RPFATAL()。 */
#define ck_write(fd, buf, len, fn) do { \
u32 _len = (len); \
s32 _res = write(fd, buf, _len); \
if (_res != _len) RPFATAL(_res, "Short write to %s", fn); \
if (_res != _len) RPFATAL(_res, "对 %s 的短写入", fn); \
} while (0)
#define ck_read(fd, buf, len, fn) do { \
u32 _len = (len); \
s32 _res = read(fd, buf, _len); \
if (_res != _len) RPFATAL(_res, "Short read from %s", fn); \
if (_res != _len) RPFATAL(_res, "对 %s 的短读取", fn); \
} while (0)
#endif /* ! _HAVE_DEBUG_H */

Loading…
Cancel
Save