[原创] AFL 源代码速通笔记
2023-4-6 19:39:25 Author: bbs.pediy.com(查看原文) 阅读量:14 收藏

因为认识的师傅们都开始卷 fuzz 了,迫于生活压力,于是也开始看这方面的内容了。由于 AFL 作为一个现在仍然适用且比较经典的 fuzzer,因此笔者也打算从它开始。

首先,一般我们用 afl 去 fuzz 一些项目的时候都需要用 afl-gcc 去代替 gcc 进行编译。先说结论,这一步的目的其实是为了向代码中插桩,完成插桩后其实还是调用原生的 gcc 进行编译

1

2

3

4

5

6

7

8

9

10

11

12

13

14

15

16

17

18

19

20

21

22

23

24

25

26

27

28

29

30

31

32

33

34

35

36

37

38

39

40

41

42

43

44

45

46

47

48

49

50

51

52

53

54

55

56

57

58

59

60

61

62

63

64

65

66

67

68

69

70

71

72

73

74

75

76

77

78

79

80

81

82

83

84

85

86

87

88

89

90

91

92

93

94

95

96

97

98

99

100

101

102

103

104

105

106

107

108

109

110

111

112

113

114

115

116

117

118

119

120

121

122

123

124

125

126

127

128

129

130

static void edit_params(u32 argc, char** argv) {

    ......

    if (!strcmp(name, "afl-g++")) {

      u8* alt_cxx = getenv("AFL_CXX");

      cc_params[0] = alt_cxx ? alt_cxx : (u8*)"g++";

    } else if (!strcmp(name, "afl-gcj")) {

      u8* alt_cc = getenv("AFL_GCJ");

      cc_params[0] = alt_cc ? alt_cc : (u8*)"gcj";

    } else {

      u8* alt_cc = getenv("AFL_CC");

      cc_params[0] = alt_cc ? alt_cc : (u8*)"gcc";

    }

  }

  while (--argc) {

    u8* cur = *(++argv);

    if (!strncmp(cur, "-B", 2)) {

      if (!be_quiet) WARNF("-B is already set, overriding");

      if (!cur[2] && argc > 1) { argc--; argv++; }

      continue;

    }

    if (!strcmp(cur, "-integrated-as")) continue;

    if (!strcmp(cur, "-pipe")) continue;

    if (!strcmp(cur, "-m32")) m32_set = 1;

    if (!strcmp(cur, "-fsanitize=address") ||

        !strcmp(cur, "-fsanitize=memory")) asan_set = 1;

    if (strstr(cur, "FORTIFY_SOURCE")) fortify_set = 1;

    cc_params[cc_par_cnt++] = cur;

  }

  cc_params[cc_par_cnt++] = "-B";

  cc_params[cc_par_cnt++] = as_path;

  if (clang_mode)

    cc_params[cc_par_cnt++] = "-no-integrated-as";

  if (getenv("AFL_HARDEN")) {

    cc_params[cc_par_cnt++] = "-fstack-protector-all";

    if (!fortify_set)

      cc_params[cc_par_cnt++] = "-D_FORTIFY_SOURCE=2";

  }

  if (asan_set) {

    /* Pass this on to afl-as to adjust map density. */

    setenv("AFL_USE_ASAN", "1", 1);

  } else if (getenv("AFL_USE_ASAN")) {

    if (getenv("AFL_USE_MSAN"))

      FATAL("ASAN and MSAN are mutually exclusive");

    if (getenv("AFL_HARDEN"))

      FATAL("ASAN and AFL_HARDEN are mutually exclusive");

    cc_params[cc_par_cnt++] = "-U_FORTIFY_SOURCE";

    cc_params[cc_par_cnt++] = "-fsanitize=address";

  } else if (getenv("AFL_USE_MSAN")) {

    if (getenv("AFL_USE_ASAN"))

      FATAL("ASAN and MSAN are mutually exclusive");

    if (getenv("AFL_HARDEN"))

      FATAL("MSAN and AFL_HARDEN are mutually exclusive");

    cc_params[cc_par_cnt++] = "-U_FORTIFY_SOURCE";

    cc_params[cc_par_cnt++] = "-fsanitize=memory";

  }

  if (!getenv("AFL_DONT_OPTIMIZE")) {

    /* On 64-bit FreeBSD systems, clang -g -m32 is broken, but -m32 itself

       works OK. This has nothing to do with us, but let's avoid triggering

       that bug. */

    if (!clang_mode || !m32_set)

      cc_params[cc_par_cnt++] = "-g";

      cc_params[cc_par_cnt++] = "-g";

    cc_params[cc_par_cnt++] = "-O3";

    cc_params[cc_par_cnt++] = "-funroll-loops";

    /* Two indicators that you're building for fuzzing; one of them is

       AFL-specific, the other is shared with libfuzzer. */

    cc_params[cc_par_cnt++] = "-D__AFL_COMPILER=1";

    cc_params[cc_par_cnt++] = "-DFUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION=1";

  }

  if (getenv("AFL_NO_BUILTIN")) {

    cc_params[cc_par_cnt++] = "-fno-builtin-strcmp";

    cc_params[cc_par_cnt++] = "-fno-builtin-strncmp";

    cc_params[cc_par_cnt++] = "-fno-builtin-strcasecmp";

    cc_params[cc_par_cnt++] = "-fno-builtin-strncasecmp";

    cc_params[cc_par_cnt++] = "-fno-builtin-memcmp";

    cc_params[cc_par_cnt++] = "-fno-builtin-strstr";

    cc_params[cc_par_cnt++] = "-fno-builtin-strcasestr";

  }

  cc_params[cc_par_cnt] = NULL;

}

afl-as 中,仍然使用 edit_params 编辑和修改参数,并使用 add_instrumentation 来对生成的汇编代码进行插桩。完成插桩后,用 fork 生成子进程,并调用原生的 as 进行编译。

1

2

3

4

5

6

7

8

9

10

11

12

13

14

15

16

17

18

19

20

21

22

23

24

25

26

27

28

29

30

31

32

33

34

35

36

37

38

39

40

41

42

43

44

45

46

47

48

49

50

51

52

53

54

55

56

57

58

59

60

61

62

63

64

65

66

67

68

69

70

71

72

73

74

75

76

77

78

79

80

81

82

83

84

85

86

87

88

89

90

91

92

93

94

95

96

97

98

99

100

101

102

103

104

105

106

107

108

109

110

111

112

113

114

115

116

117

118

119

120

121

122

123

124

static void add_instrumentation(void) {

    ......

    /* If we're in the right mood for instrumenting, check for function

       names or conditional labels. This is a bit messy, but in essence,

       we want to catch:

         ^main:      - function entry point (always instrumented)

         ^.L0:       - GCC branch label

         ^.LBB0_0:   - clang branch label (but only in clang mode)

         ^\tjnz foo  - conditional branches

       ...but not:

         ^

         ^

         ^.Ltmp0:    - clang non-branch labels

         ^.LC0       - GCC non-branch labels

         ^.LBB0_0:   - ditto (when in GCC mode)

         ^\tjmp foo  - non-conditional jumps

       Additionally, clang and GCC on MacOS X follow a different convention

       with no leading dots on labels, hence the weird maze of

       later on.

     */

    if (skip_intel || skip_app || skip_csect || !instr_ok ||

        line[0] == '#' || line[0] == ' ') continue;

    /* Conditional branch instruction (jnz, etc). We append the instrumentation

       right after the branch (to instrument the not-taken path) and at the

       branch destination label (handled later on). */

    if (line[0] == '\t') {

      if (line[1] == 'j' && line[2] != 'm' && R(100) < inst_ratio) {

        fprintf(outf, use_64bit ? trampoline_fmt_64 : trampoline_fmt_32,

                R(MAP_SIZE));

        ins_lines++;

      }

      continue;

    }

    /* Label of some sort. This may be a branch destination, but we need to

       tread carefully and account for several different formatting

       conventions. */

    /* Apple: L<whatever><digit>: */

    if ((colon_pos = strstr(line, ":"))) {

      if (line[0] == 'L' && isdigit(*(colon_pos - 1))) {

    /* Everybody else: .L<whatever>: */

    if (strstr(line, ":")) {

      if (line[0] == '.') {

        /* .L0: or LBB0_0: style jump destination */

        /* Apple: L<num> / LBB<num> */

        if ((isdigit(line[1]) || (clang_mode && !strncmp(line, "LBB", 3)))

            && R(100) < inst_ratio) {

        /* Apple: .L<num> / .LBB<num> */

        if ((isdigit(line[2]) || (clang_mode && !strncmp(line + 1, "LBB", 3)))

            && R(100) < inst_ratio) {

          /* An optimization is possible here by adding the code only if the

             label is mentioned in the code in contexts other than call / jmp.

             That said, this complicates the code by requiring two-pass

             processing (messy with stdin), and results in a speed gain

             typically under 10%, because compilers are generally pretty good

             about not generating spurious intra-function jumps.

             We use deferred output chiefly to avoid disrupting

             .Lfunc_begin0-style exception handling calculations (a problem on

             MacOS X). */

          if (!skip_next_label) instrument_next = 1; else skip_next_label = 0;

        }

      } else {

        /* Function label (always instrumented, deferred mode). */

        instrument_next = 1;

      }

    }

  }

  if (ins_lines)

    fputs(use_64bit ? main_payload_64 : main_payload_32, outf);

  if (input_file) fclose(inf);

  fclose(outf);

  if (!be_quiet) {

    if (!ins_lines) WARNF("No instrumentation targets found%s.",

                          pass_thru ? " (pass-thru mode)" : "");

    else OKF("Instrumented %u locations (%s-bit, %s mode, ratio %u%%).",

             ins_lines, use_64bit ? "64" : "32",

             getenv("AFL_HARDEN") ? "hardened" :

             (sanitizer ? "ASAN/MSAN" : "non-hardened"),

             inst_ratio);

  }

}

一般来说,我们会用 afl-fuzz -i input -o output -- programe 启动 fuzzer,对应的,afl-fuzz.c 中的前半部分都在做参数解析的工作:

1

2

3

4

5

6

7

8

9

10

11

12

13

14

15

16

17

18

19

20

21

22

23

24

25

26

27

28

29

30

31

32

33

34

35

36

37

38

39

40

41

42

43

44

45

46

47

48

49

50

51

52

53

54

55

56

57

58

59

60

61

62

63

64

65

66

67

68

69

70

71

72

73

74

75

76

77

78

79

80

81

82

83

84

85

86

87

88

89

90

91

92

93

94

95

96

97

98

99

100

101

102

103

104

105

106

107

108

109

110

111

112

113

114

115

116

117

118

119

120

121

122

123

124

125

126

127

128

129

130

131

132

133

134

135

136

137

138

139

140

141

142

143

144

145

146

147

148

149

150

151

152

153

154

155

156

157

158

159

160

161

162

163

164

165

166

167

168

169

170

171

172

173

174

175

176

177

178

179

180

181

182

183

184

185

186

187

188

189

190

191

192

193

194

195

196

197

198

199

200

201

202

203

204

205

206

207

int main(int argc, char** argv) {

  ......

  setup_signal_handlers();

  check_asan_opts();

  if (sync_id) fix_up_sync();

  if (!strcmp(in_dir, out_dir))

    FATAL("Input and output directories can't be the same");

  if (dumb_mode) {

    if (crash_mode) FATAL("-C and -n are mutually exclusive");

    if (qemu_mode)  FATAL("-Q and -n are mutually exclusive");

  }

  if (getenv("AFL_NO_FORKSRV"))    no_forkserver    = 1;

  if (getenv("AFL_NO_CPU_RED"))    no_cpu_meter_red = 1;

  if (getenv("AFL_NO_ARITH"))      no_arith         = 1;

  if (getenv("AFL_SHUFFLE_QUEUE")) shuffle_queue    = 1;

  if (getenv("AFL_FAST_CAL"))      fast_cal         = 1;

  if (getenv("AFL_HANG_TMOUT")) {

    hang_tmout = atoi(getenv("AFL_HANG_TMOUT"));

    if (!hang_tmout) FATAL("Invalid value of AFL_HANG_TMOUT");

  }

  if (dumb_mode == 2 && no_forkserver)

    FATAL("AFL_DUMB_FORKSRV and AFL_NO_FORKSRV are mutually exclusive");

  if (getenv("AFL_PRELOAD")) {

    setenv("LD_PRELOAD", getenv("AFL_PRELOAD"), 1);

    setenv("DYLD_INSERT_LIBRARIES", getenv("AFL_PRELOAD"), 1);

  }

  if (getenv("AFL_LD_PRELOAD"))

    FATAL("Use AFL_PRELOAD instead of AFL_LD_PRELOAD");

  save_cmdline(argc, argv);

  fix_up_banner(argv[optind]);

  check_if_tty();

  get_core_count();

  bind_to_free_cpu();

  check_crash_handling();

  check_cpu_governor();

  setup_post();

  setup_shm();

  init_count_class16();

  setup_dirs_fds();

  read_testcases();

  load_auto();

  pivot_inputs();

  if (extras_dir) load_extras(extras_dir);

  if (!timeout_given) find_timeout();

  detect_file_args(argv + optind + 1);

  if (!out_file) setup_stdio_file();

  check_binary(argv[optind]);

  start_time = get_cur_time();

  if (qemu_mode)

    use_argv = get_qemu_argv(argv[0], argv + optind, argc - optind);

  else

    use_argv = argv + optind;

  perform_dry_run(use_argv);

  cull_queue();

  show_init_stats();

  seek_to = find_start_position();

  write_stats_file(0, 0, 0);

  save_auto();

  if (stop_soon) goto stop_fuzzing;

  /* Woop woop woop */

  if (!not_on_tty) {

    sleep(4);

    start_time += 4000;

    if (stop_soon) goto stop_fuzzing;

  }

  while (1) {

    u8 skipped_fuzz;

    cull_queue();

    if (!queue_cur) {

      queue_cycle++;

      current_entry     = 0;

      cur_skipped_paths = 0;

      queue_cur         = queue;

      while (seek_to) {

        current_entry++;

        seek_to--;

        queue_cur = queue_cur->next;

      }

      show_stats();

      if (not_on_tty) {

        ACTF("Entering queue cycle %llu.", queue_cycle);

        fflush(stdout);

      }

      /* If we had a full queue cycle with no new finds, try

         recombination strategies next. */

      if (queued_paths == prev_queued) {

        if (use_splicing) cycles_wo_finds++; else use_splicing = 1;

      } else cycles_wo_finds = 0;

      prev_queued = queued_paths;

      if (sync_id && queue_cycle == 1 && getenv("AFL_IMPORT_FIRST"))

        sync_fuzzers(use_argv);

    }

    skipped_fuzz = fuzz_one(use_argv);

    if (!stop_soon && sync_id && !skipped_fuzz) {

      if (!(sync_interval_cnt++ % SYNC_INTERVAL))

        sync_fuzzers(use_argv);

    }

    if (!stop_soon && exit_1) stop_soon = 2;

    if (stop_soon) break;

    queue_cur = queue_cur->next;

    current_entry++;

  }

  if (queue_cur) show_stats();

  /* If we stopped programmatically, we kill the forkserver and the current runner.

     If we stopped manually, this is done by the signal handler. */

  if (stop_soon == 2) {

      if (child_pid > 0) kill(child_pid, SIGKILL);

      if (forksrv_pid > 0) kill(forksrv_pid, SIGKILL);

  }

  /* Now that we've killed the forkserver, we wait for it to be able to get rusage stats. */

  if (waitpid(forksrv_pid, NULL, 0) <= 0) {

    WARNF("error waitpid\n");

  }

  write_bitmap();

  write_stats_file(0, 0, 0);

  save_auto();

stop_fuzzing:

  SAYF(CURSOR_SHOW cLRD "\n\n+++ Testing aborted %s +++\n" cRST,

       stop_soon == 2 ? "programmatically" : "by user");

  /* Running for more than 30 minutes but still doing first cycle? */

  if (queue_cycle == 1 && get_cur_time() - start_time > 30 * 60 * 1000) {

    SAYF("\n" cYEL "[!] " cRST

           "Stopped during the first cycle, results may be incomplete.\n"

           "    (For info on resuming, see %s/README.)\n", doc_path);

  }

  fclose(plot_file);

  destroy_queue();

  destroy_extras();

  ck_free(target_path);

  ck_free(sync_id);

  alloc_report();

  OKF("We're done here. Have a nice day!\n");

  exit(0);

}

笔者在 Ubuntu20 上跑 AFL 就会遇到这个问题,因为在默认情况下,系统会将崩溃信息通过管道发送给外部程序,由于这会影响到效率,因此通过 echo core >/proc/sys/kernel/core_pattern 修改保存崩溃信息的方式,将它保存为本地文件。

如果用户指定了环境变量 AFL_POST_LIBRARY ,那么就会从对应的路径下加载动态库并加载 afl_postprocess 函数并保存在 post_handler 中。

1

2

3

4

5

6

7

8

9

10

11

12

13

14

15

16

17

18

EXP_ST void setup_shm(void) {

  u8* shm_str;

  if (!in_bitmap) memset(virgin_bits, 255, MAP_SIZE);

  memset(virgin_tmout, 255, MAP_SIZE);

  memset(virgin_crash, 255, MAP_SIZE);

  shm_id = shmget(IPC_PRIVATE, MAP_SIZE, IPC_CREAT | IPC_EXCL | 0600);

  if (shm_id < 0) PFATAL("shmget() failed");

  atexit(remove_shm);

  shm_str = alloc_printf("%d", shm_id);

  /* If somebody is asking us to fuzz instrumented binaries in dumb mode,

     we don't want them to detect instrumentation, since we won't be sending

     fork server commands. This should be replaced with better auto-detection

     later on, perhaps? */

  if (!dumb_mode) setenv(SHM_ENV_VAR, shm_str, 1);

  ck_free(shm_str);

  trace_bits = shmat(shm_id, NULL, 0);

  if (trace_bits == (void *)-1) PFATAL("shmat() failed");

}

1

2

3

4

5

6

7

8

9

10

11

12

13

14

15

16

17

18

19

20

21

22

23

24

25

26

27

28

29

30

31

32

33

34

35

36

37

38

39

40

41

42

43

44

45

46

47

48

49

50

51

52

53

54

55

56

57

58

59

60

61

62

63

64

65

66

67

68

69

70

71

72

73

74

75

76

77

78

79

80

static void read_testcases(void) {

  struct dirent **nl;

  s32 nl_cnt;

  u32 i;

  u8* fn;

  /* Auto-detect non-in-place resumption attempts. */

  fn = alloc_printf("%s/queue", in_dir);

  if (!access(fn, F_OK)) in_dir = fn; else ck_free(fn);

  ACTF("Scanning '%s'...", in_dir);

  /* We use scandir() + alphasort() rather than readdir() because otherwise,

     the ordering  of test cases would vary somewhat randomly and would be

     difficult to control. */

  nl_cnt = scandir(in_dir, &nl, NULL, alphasort);

  if (nl_cnt < 0) {

    if (errno == ENOENT || errno == ENOTDIR)

      SAYF("\n" cLRD "[-] " cRST

           "The input directory does not seem to be valid - try again. The fuzzer needs\n"

           "    one or more test case to start with - ideally, a small file under 1 kB\n"

           "    or so. The cases must be stored as regular files directly in the input\n"

           "    directory.\n");

    PFATAL("Unable to open '%s'", in_dir);

  }

  if (shuffle_queue && nl_cnt > 1) {

    ACTF("Shuffling queue...");

    shuffle_ptrs((void**)nl, nl_cnt);

  }

  for (i = 0; i < nl_cnt; i++) {

    struct stat st;

    u8* fn = alloc_printf("%s/%s", in_dir, nl[i]->d_name);

    u8* dfn = alloc_printf("%s/.state/deterministic_done/%s", in_dir, nl[i]->d_name);

    u8  passed_det = 0;

    free(nl[i]); /* not tracked */

    if (lstat(fn, &st) || access(fn, R_OK))

      PFATAL("Unable to access '%s'", fn);

    /* This also takes care of . and .. */

    if (!S_ISREG(st.st_mode) || !st.st_size || strstr(fn, "/README.testcases")) {

      ck_free(fn);

      ck_free(dfn);

      continue;

    }

    if (st.st_size > MAX_FILE)

      FATAL("Test case '%s' is too big (%s, limit is %s)", fn,

            DMS(st.st_size), DMS(MAX_FILE));

    /* Check for metadata that indicates that deterministic fuzzing

       is complete for this entry. We don't want to repeat deterministic

       fuzzing when resuming aborted scans, because it would be pointless

       and probably very time-consuming. */

    if (!access(dfn, F_OK)) passed_det = 1;

    ck_free(dfn);

    add_to_queue(fn, st.st_size, passed_det);

  }

如果指定了 resuming_fuzz ,即从输出目录当中恢复模糊测试状态,会从之前的模糊测试状态 fuzzer_stats 文件中计算中 timeout 值,保存在 exec_tmout 中。

检测输入的命令行中是否包含@@参数,如果包含的话需要将 @@ 替换成目录文件 "%s/.cur_input", out_dir ,使得模糊测试目标程序的命令完整;同时将目录文件 "%s/.cur_input" 路径保存在 out_file 当中,后续变异的内容保存在该文件路径中,用于运行测试目标文件。

如果目标程序的输入不是来源于文件而是来源于标准输入的话,则将目录文件 "%s/.cur_input" 文件打开保存在 out_fd 文件句柄中,后续将标准输入重定向到该文件中;结合 detect_file_args 函数实现了将变异的内容保存在 "%s/.cur_input" 文件中,运行目标测试文件并进行模糊测试。

1

2

3

4

5

6

7

8

9

10

11

12

13

14

15

16

17

18

19

20

21

22

23

24

25

26

27

28

29

30

31

32

33

34

35

36

37

38

39

40

41

42

43

44

45

46

47

48

49

50

51

52

53

54

55

56

57

58

59

static void perform_dry_run(char** argv) {

  struct queue_entry* q = queue;

  u32 cal_failures = 0;

  u8* skip_crashes = getenv("AFL_SKIP_CRASHES");

  while (q) {

    u8* use_mem;

    u8  res;

    s32 fd;

    u8* fn = strrchr(q->fname, '/') + 1;

    ACTF("Attempting dry run with '%s'...", fn);

    fd = open(q->fname, O_RDONLY);

    if (fd < 0) PFATAL("Unable to open '%s'", q->fname);

    use_mem = ck_alloc_nozero(q->len);

    if (read(fd, use_mem, q->len) != q->len)

      FATAL("Short read from '%s'", q->fname);

    close(fd);

    res = calibrate_case(argv, q, use_mem, 0, 1);

    ck_free(use_mem);

    if (stop_soon) return;

    if (res == crash_mode || res == FAULT_NOBITS)

      SAYF(cGRA "    len = %u, map size = %u, exec speed = %llu us\n" cRST,

           q->len, q->bitmap_size, q->exec_us);

    ......

    if (q->var_behavior) WARNF("Instrumentation output varies across runs.");

    q = q->next;

  }

  if (cal_failures) {

    if (cal_failures == queued_paths)

      FATAL("All test cases time out%s, giving up!",

            skip_crashes ? " or crash" : "");

    WARNF("Skipped %u test cases (%0.02f%%) due to timeouts%s.", cal_failures,

          ((double)cal_failures) * 100 / queued_paths,

          skip_crashes ? " or crashes" : "");

    if (cal_failures * 5 > queued_paths)

      WARNF(cLRD "High percentage of rejected test cases, check settings!");

  }

  OKF("All test cases processed.");

}

1

2

3

4

5

6

7

8

9

10

11

12

13

14

15

16

17

18

19

20

21

22

23

24

25

26

27

28

29

30

31

32

33

34

35

36

37

38

39

40

41

42

43

44

45

46

47

48

49

50

51

52

53

54

55

56

57

58

59

60

61

62

63

64

65

66

67

68

69

70

71

72

73

74

75

76

77

78

79

80

81

82

83

84

85

86

87

88

89

90

91

92

93

94

95

96

97

98

99

100

101

102

103

104

105

106

107

108

109

110

111

112

113

114

115

116

117

118

119

120

121

122

123

124

125

126

127

128

129

130

131

132

133

134

135

136

137

138

139

140

141

142

143

144

145

146

147

148

149

150

151

static u8 calibrate_case(char** argv, struct queue_entry* q, u8* use_mem,

                         u32 handicap, u8 from_queue) {

  static u8 first_trace[MAP_SIZE];

  u8  fault = 0, new_bits = 0, var_detected = 0, hnb = 0,

      first_run = (q->exec_cksum == 0);

  u64 start_us, stop_us;

  s32 old_sc = stage_cur, old_sm = stage_max;

  u32 use_tmout = exec_tmout;

  u8* old_sn = stage_name;

  /* Be a bit more generous about timeouts when resuming sessions, or when

     trying to calibrate already-added finds. This helps avoid trouble due

     to intermittent latency. */

  if (!from_queue || resuming_fuzz)

    use_tmout = MAX(exec_tmout + CAL_TMOUT_ADD,

                    exec_tmout * CAL_TMOUT_PERC / 100);

  q->cal_failed++;

  stage_name = "calibration";

  stage_max  = fast_cal ? 3 : CAL_CYCLES;

  /* Make sure the forkserver is up before we do anything, and let's not

     count its spin-up time toward binary calibration. */

  if (dumb_mode != 1 && !no_forkserver && !forksrv_pid)

    init_forkserver(argv);

  if (q->exec_cksum) {

    memcpy(first_trace, trace_bits, MAP_SIZE);

    hnb = has_new_bits(virgin_bits);

    if (hnb > new_bits) new_bits = hnb;

  }

  start_us = get_cur_time_us();

  for (stage_cur = 0; stage_cur < stage_max; stage_cur++) {

    u32 cksum;

    if (!first_run && !(stage_cur % stats_update_freq)) show_stats();

    write_to_testcase(use_mem, q->len);

    fault = run_target(argv, use_tmout);

    /* stop_soon is set by the handler for Ctrl+C. When it's pressed,

       we want to bail out quickly. */

    if (stop_soon || fault != crash_mode) goto abort_calibration;

    if (!dumb_mode && !stage_cur && !count_bytes(trace_bits)) {

      fault = FAULT_NOINST;

      goto abort_calibration;

    }

    cksum = hash32(trace_bits, MAP_SIZE, HASH_CONST);

    if (q->exec_cksum != cksum) {

      hnb = has_new_bits(virgin_bits);

      if (hnb > new_bits) new_bits = hnb;

      if (q->exec_cksum) {

        u32 i;

        for (i = 0; i < MAP_SIZE; i++) {

          if (!var_bytes[i] && first_trace[i] != trace_bits[i]) {

            var_bytes[i] = 1;

            stage_max    = CAL_CYCLES_LONG;

          }

        }

        var_detected = 1;

      } else {

        q->exec_cksum = cksum;

        memcpy(first_trace, trace_bits, MAP_SIZE);

      }

    }

  }

  stop_us = get_cur_time_us();

  total_cal_us     += stop_us - start_us;

  total_cal_cycles += stage_max;

  /* OK, let's collect some stats about the performance of this test case.

     This is used for fuzzing air time calculations in calculate_score(). */

  q->exec_us     = (stop_us - start_us) / stage_max;

  q->bitmap_size = count_bytes(trace_bits);

  q->handicap    = handicap;

  q->cal_failed  = 0;

  total_bitmap_size += q->bitmap_size;

  total_bitmap_entries++;

  update_bitmap_score(q);

  /* If this case didn't result in new output from the instrumentation, tell

     parent. This is a non-critical problem, but something to warn the user

     about. */

  if (!dumb_mode && first_run && !fault && !new_bits) fault = FAULT_NOBITS;

abort_calibration:

  if (new_bits == 2 && !q->has_new_cov) {

    q->has_new_cov = 1;

    queued_with_cov++;

  }

  /* Mark variable paths. */

  if (var_detected) {

    var_byte_count = count_bytes(var_bytes);

    if (!q->var_behavior) {

      mark_as_variable(q);

      queued_variable++;

    }

  }

  stage_name = old_sn;

  stage_cur  = old_sc;

  stage_max  = old_sm;

  if (!first_run) show_stats();

  return fault;

}

一般来说,如果我们想要测试输入样例,就需要用 fork+execve 去执行相关的二进制程序,但是执行程序是需要加载代码、动态库、符号解析等各种耗时的行为,这会让 AFL 不够效率。

但是这个过程其实是存在浪费的,可以注意到,如果我们要对相同的二进制程序进行多次不同的输入样本进行测试,那按照原本的操作,我们应该多次执行 fork+execve ,而浪费就出现在这,因为我们明明已经加载好了一切,却又要因此重复加载释放。

因此 fork server 的设计主要就是为了解决这个浪费。它通过向代码中进行插桩的方式,使得在二进制程序中去建立一个 fork server(对,它实际上是由目标程序去建立的),然后这个 fork server 会在完成一切初始化后,停止在某一个地方(往往设定在 main 函数)等待 fuzzer 去喊开始执行。

一旦 fuzzer 喊了开始,就会由这个 fork server 去调用 fork 然后往下执行。而我们知道,fork 由于写时复制的机制存在,它其实并没有过多的开销,可以完全继承原有的所有上下文信息,从而避开了多次 execve 的加载开销。

1

2

3

4

5

6

7

8

9

10

11

12

13

14

15

16

17

18

19

20

21

22

23

24

25

26

27

28

29

30

31

32

33

34

35

36

37

38

39

40

41

42

43

44

45

46

47

48

49

50

51

52

53

54

55

56

57

58

59

60

61

62

63

64

65

66

67

68

69

70

71

72

73

74

__afl_forkserver:

  /* Phone home and tell the parent that we're OK. */

  pushl $4          /* length    */

  pushl $__afl_temp /* data      */

  pushl $199        /* file desc */

  call  write

  addl  $12, %esp

__afl_fork_wait_loop:

  /* Wait for parent by reading from the pipe. This will block until

     the parent sends us something. Abort if read fails. */

  pushl $4          /* length    */

  pushl $__afl_temp /* data      */

  pushl $198        /* file desc */

  call  read

  addl  $12, %esp

  cmpl  $4, %eax

  jne   __afl_die

  /* Once woken up, create a clone of our process. */

  call fork

  cmpl $0, %eax

  jl   __afl_die

  je   __afl_fork_resume

  /* In parent process: write PID to pipe, then wait for child.

     Parent will handle timeouts and SIGKILL the child as needed. */

  movl  %eax, __afl_fork_pid

  pushl $4              /* length    */

  pushl $__afl_fork_pid /* data      */

  pushl $199            /* file desc */

  call  write

  addl  $12, %esp

  pushl $2             /* WUNTRACED */

  pushl $__afl_temp    /* status    */

  pushl __afl_fork_pid /* PID       */

  call  waitpid

  addl  $12, %esp

  cmpl  $0, %eax

  jle   __afl_die

  /* Relay wait status to pipe, then loop back. */

  pushl $4          /* length    */

  pushl $__afl_temp /* data      */

  pushl $199        /* file desc */

  call  write

  addl  $12, %esp

  jmp __afl_fork_wait_loop

__afl_fork_resume:

  /* In child process: close fds, resume execution. */

  pushl $198

  call  close

  pushl $199

  call  close

  addl  $8, %esp

  ret

fork server 主要是通过管道和 afl-fuzz 中的 fork server 进行通信的,但他们其实不做过多的事情,往往只是通知一下程序运行的状态。因为真正的反馈信息,包括路径的发现等这部分功能是通过共享内存去实现的,它们不需要用 fork server 这种效率较低的方案去记录数据。

1

2

3

4

5

6

7

8

9

10

11

12

13

14

15

16

17

18

19

20

21

22

23

24

25

26

27

28

29

30

31

32

33

34

35

36

37

38

39

40

41

42

43

44

45

46

47

48

49

50

51

52

53

54

55

56

57

58

59

60

61

62

63

64

65

66

67

68

69

70

71

72

73

74

75

76

77

78

79

80

81

82

83

84

85

86

87

88

89

90

91

92

93

94

95

96

97

98

99

100

101

102

103

104

105

106

107

108

109

110

111

112

113

114

115

116

117

118

119

120

121

122

123

124

125

126

127

128

129

130

131

132

133

134

135

136

137

138

139

140

141

142

143

144

145

146

147

148

149

150

151

152

153

154

155

156

157

158

159

160

161

162

163

164

165

166

167

168

169

170

171

172

173

174

175

176

177

178

179

180

181

182

183

184

185

186

187

188

189

190

191

192

193

194

195

196

197

198

199

200

201

202

203

204

205

206

207

208

209

210

211

212

213

214

215

216

217

218

219

220

221

222

223

224

225

226

227

228

229

230

231

232

233

234

235

236

237

238

239

240

241

242

243

244

245

246

247

248

249

250

251

252

253

254

255

256

257

258

259

260

261

262

263

264

265

266

267

268

269

270

271

272

273

274

275

276

277

278

279

280

EXP_ST void init_forkserver(char** argv) {

  static struct itimerval it;

  int st_pipe[2], ctl_pipe[2];

  int status;

  s32 rlen;

  ACTF("Spinning up the fork server...");

  if (pipe(st_pipe) || pipe(ctl_pipe)) PFATAL("pipe() failed");

  forksrv_pid = fork();

  if (forksrv_pid < 0) PFATAL("fork() failed");

  if (!forksrv_pid) {

    struct rlimit r;

    /* Umpf. On OpenBSD, the default fd limit for root users is set to

       soft 128. Let's try to fix that... */

    if (!getrlimit(RLIMIT_NOFILE, &r) && r.rlim_cur < FORKSRV_FD + 2) {

      r.rlim_cur = FORKSRV_FD + 2;

      setrlimit(RLIMIT_NOFILE, &r); /* Ignore errors */

    }

    if (mem_limit) {

      r.rlim_max = r.rlim_cur = ((rlim_t)mem_limit) << 20;

      setrlimit(RLIMIT_AS, &r); /* Ignore errors */

      /* This takes care of OpenBSD, which doesn't have RLIMIT_AS, but

         according to reliable sources, RLIMIT_DATA covers anonymous

         maps - so we should be getting good protection against OOM bugs. */

      setrlimit(RLIMIT_DATA, &r); /* Ignore errors */

    }

    /* Dumping cores is slow and can lead to anomalies if SIGKILL is delivered

       before the dump is complete. */

    r.rlim_max = r.rlim_cur = 0;

    setrlimit(RLIMIT_CORE, &r); /* Ignore errors */

    /* Isolate the process and configure standard descriptors. If out_file is

       specified, stdin is /dev/null; otherwise, out_fd is cloned instead. */

    setsid();

    dup2(dev_null_fd, 1);

    dup2(dev_null_fd, 2);

    if (out_file) {

      dup2(dev_null_fd, 0);

    } else {

      dup2(out_fd, 0);

      close(out_fd);

    }

    /* Set up control and status pipes, close the unneeded original fds. */

    if (dup2(ctl_pipe[0], FORKSRV_FD) < 0) PFATAL("dup2() failed");

    if (dup2(st_pipe[1], FORKSRV_FD + 1) < 0) PFATAL("dup2() failed");

    close(ctl_pipe[0]);

    close(ctl_pipe[1]);

    close(st_pipe[0]);

    close(st_pipe[1]);

    close(out_dir_fd);

    close(dev_null_fd);

    close(dev_urandom_fd);

    close(fileno(plot_file));

    /* This should improve performance a bit, since it stops the linker from

       doing extra work post-fork(). */

    if (!getenv("LD_BIND_LAZY")) setenv("LD_BIND_NOW", "1", 0);

    /* Set sane defaults for ASAN if nothing else specified. */

    setenv("ASAN_OPTIONS", "abort_on_error=1:"

                           "detect_leaks=0:"

                           "symbolize=0:"

                           "allocator_may_return_null=1", 0);

    /* MSAN is tricky, because it doesn't support abort_on_error=1 at this

       point. So, we do this in a very hacky way. */

    setenv("MSAN_OPTIONS", "exit_code=" STRINGIFY(MSAN_ERROR) ":"

                           "symbolize=0:"

                           "abort_on_error=1:"

                           "allocator_may_return_null=1:"

                           "msan_track_origins=0", 0);

    execv(target_path, argv);

    /* Use a distinctive bitmap signature to tell the parent about execv()

       falling through. */

    *(u32*)trace_bits = EXEC_FAIL_SIG;

    exit(0);

  }

  /* Close the unneeded endpoints. */

  close(ctl_pipe[0]);

  close(st_pipe[1]);

  fsrv_ctl_fd = ctl_pipe[1];

  fsrv_st_fd  = st_pipe[0];

  /* Wait for the fork server to come up, but don't wait too long. */

  it.it_value.tv_sec = ((exec_tmout * FORK_WAIT_MULT) / 1000);

  it.it_value.tv_usec = ((exec_tmout * FORK_WAIT_MULT) % 1000) * 1000;

  setitimer(ITIMER_REAL, &it, NULL);

  rlen = read(fsrv_st_fd, &status, 4);

  it.it_value.tv_sec = 0;

  it.it_value.tv_usec = 0;

  setitimer(ITIMER_REAL, &it, NULL);

  /* If we have a four-byte "hello" message from the server, we're all set.

     Otherwise, try to figure out what went wrong. */

  if (rlen == 4) {

    OKF("All right - fork server is up.");

    return;

  }

  if (child_timed_out)

    FATAL("Timeout while initializing fork server (adjusting -t may help)");

  if (waitpid(forksrv_pid, &status, 0) <= 0)

    PFATAL("waitpid() failed");

  if (WIFSIGNALED(status)) {

    if (mem_limit && mem_limit < 500 && uses_asan) {

      SAYF("\n" cLRD "[-] " cRST

           "Whoops, the target binary crashed suddenly, before receiving any input\n"

           "    from the fuzzer! Since it seems to be built with ASAN and you have a\n"

           "    restrictive memory limit configured, this is expected; please read\n"

           "    %s/notes_for_asan.txt for help.\n", doc_path);

    } else if (!mem_limit) {

      SAYF("\n" cLRD "[-] " cRST

           "Whoops, the target binary crashed suddenly, before receiving any input\n"

           "    from the fuzzer! There are several probable explanations:\n\n"

           "    - The binary is just buggy and explodes entirely on its own. If so, you\n"

           "      need to fix the underlying problem or find a better replacement.\n\n"

           "    - On MacOS X, the semantics of fork() syscalls are non-standard and may\n"

           "      break afl-fuzz performance optimizations when running platform-specific\n"

           "      targets. To fix this, set AFL_NO_FORKSRV=1 in the environment.\n\n"

           "    - Less likely, there is a horrible bug in the fuzzer. If other options\n"

    } else {

      SAYF("\n" cLRD "[-] " cRST

           "Whoops, the target binary crashed suddenly, before receiving any input\n"

           "    from the fuzzer! There are several probable explanations:\n\n"

           "    - The current memory limit (%s) is too restrictive, causing the\n"

           "      target to hit an OOM condition in the dynamic linker. Try bumping up\n"

           "      the limit with the -m setting in the command line. A simple way confirm\n"

           "      this diagnosis would be:\n\n"

           "      ( ulimit -Sv $[%llu << 10]; /path/to/fuzzed_app )\n\n"

           "      ( ulimit -Sd $[%llu << 10]; /path/to/fuzzed_app )\n\n"

           "      Tip: you can use http://jwilk.net/software/recidivm to quickly\n"

           "      estimate the required amount of virtual memory for the binary.\n\n"

           "    - The binary is just buggy and explodes entirely on its own. If so, you\n"

           "      need to fix the underlying problem or find a better replacement.\n\n"

           "    - On MacOS X, the semantics of fork() syscalls are non-standard and may\n"

           "      break afl-fuzz performance optimizations when running platform-specific\n"

           "      targets. To fix this, set AFL_NO_FORKSRV=1 in the environment.\n\n"

           "    - Less likely, there is a horrible bug in the fuzzer. If other options\n"

           DMS(mem_limit << 20), mem_limit - 1);

    }

    FATAL("Fork server crashed with signal %d", WTERMSIG(status));

  }

  if (*(u32*)trace_bits == EXEC_FAIL_SIG)

    FATAL("Unable to execute target application ('%s')", argv[0]);

  if (mem_limit && mem_limit < 500 && uses_asan) {

    SAYF("\n" cLRD "[-] " cRST

           "Hmm, looks like the target binary terminated before we could complete a\n"

           "    handshake with the injected code. Since it seems to be built with ASAN and\n"

           "    you have a restrictive memory limit configured, this is expected; please\n"

           "    read %s/notes_for_asan.txt for help.\n", doc_path);

  } else if (!mem_limit) {

    SAYF("\n" cLRD "[-] " cRST

         "Hmm, looks like the target binary terminated before we could complete a\n"

         "    handshake with the injected code. Perhaps there is a horrible bug in the\n"

  } else {

    SAYF("\n" cLRD "[-] " cRST

         "Hmm, looks like the target binary terminated before we could complete a\n"

         "    handshake with the injected code. There are %s probable explanations:\n\n"

         "%s"

         "    - The current memory limit (%s) is too restrictive, causing an OOM\n"

         "      fault in the dynamic linker. This can be fixed with the -m option. A\n"

         "      simple way to confirm the diagnosis may be:\n\n"

         "      ( ulimit -Sv $[%llu << 10]; /path/to/fuzzed_app )\n\n"

         "      ( ulimit -Sd $[%llu << 10]; /path/to/fuzzed_app )\n\n"

         "      Tip: you can use http://jwilk.net/software/recidivm to quickly\n"

         "      estimate the required amount of virtual memory for the binary.\n\n"

         "    - Less likely, there is a horrible bug in the fuzzer. If other options\n"

         getenv(DEFER_ENV_VAR) ? "three" : "two",

         getenv(DEFER_ENV_VAR) ?

         "    - You are using deferred forkserver, but __AFL_INIT() is never\n"

         "      reached before the program terminates.\n\n" : "",

         DMS(mem_limit << 20), mem_limit - 1);

  }

  FATAL("Fork server handshake failed");

}

1

2

3

4

5

6

7

8

9

10

11

12

13

14

15

16

17

18

19

20

21

22

23

24

25

26

27

28

29

30

31

32

33

34

35

36

37

38

39

40

41

42

43

44

45

46

47

48

49

50

51

52

53

54

55

56

57

58

59

60

61

62

63

64

65

66

67

68

69

70

71

72

73

74

75

76

77

78

79

80

81

82

83

84

85

86

87

88

89

90

91

92

93

94

95

96

97

98

99

100

101

102

103

104

105

106

107

108

109

110

111

112

113

114

115

116

117

118

static u8 trim_case(char** argv, struct queue_entry* q, u8* in_buf) {

  static u8 tmp[64];

  static u8 clean_trace[MAP_SIZE];

  u8  needs_write = 0, fault = 0;

  u32 trim_exec = 0;

  u32 remove_len;

  u32 len_p2;

  /* Although the trimmer will be less useful when variable behavior is

     detected, it will still work to some extent, so we don't check for

     this. */

  if (q->len < 5) return 0;

  stage_name = tmp;

  bytes_trim_in += q->len;

  /* Select initial chunk len, starting with large steps. */

  len_p2 = next_p2(q->len);

  remove_len = MAX(len_p2 / TRIM_START_STEPS, TRIM_MIN_BYTES);

  /* Continue until the number of steps gets too high or the stepover

     gets too small. */

  while (remove_len >= MAX(len_p2 / TRIM_END_STEPS, TRIM_MIN_BYTES)) {

    u32 remove_pos = remove_len;

    sprintf(tmp, "trim %s/%s", DI(remove_len), DI(remove_len));

    stage_cur = 0;

    stage_max = q->len / remove_len;

    while (remove_pos < q->len) {

      u32 trim_avail = MIN(remove_len, q->len - remove_pos);

      u32 cksum;

      write_with_gap(in_buf, q->len, remove_pos, trim_avail);

      fault = run_target(argv, exec_tmout);

      trim_execs++;

      if (stop_soon || fault == FAULT_ERROR) goto abort_trimming;

      /* Note that we don't keep track of crashes or hangs here; maybe TODO? */

      cksum = hash32(trace_bits, MAP_SIZE, HASH_CONST);

      /* If the deletion had no impact on the trace, make it permanent. This

         isn't perfect for variable-path inputs, but we're just making a

         best-effort pass, so it's not a big deal if we end up with false

         negatives every now and then. */

      if (cksum == q->exec_cksum) {

        u32 move_tail = q->len - remove_pos - trim_avail;

        q->len -= trim_avail;

        len_p2  = next_p2(q->len);

        memmove(in_buf + remove_pos, in_buf + remove_pos + trim_avail,

                move_tail);

        /* Let's save a clean trace, which will be needed by

           update_bitmap_score once we're done with the trimming stuff. */

        if (!needs_write) {

          needs_write = 1;

          memcpy(clean_trace, trace_bits, MAP_SIZE);

        }

      } else remove_pos += remove_len;

      /* Since this can be slow, update the screen every now and then. */

      if (!(trim_exec++ % stats_update_freq)) show_stats();

      stage_cur++;

    }

    remove_len >>= 1;

  }

  /* If we have made changes to in_buf, we also need to update the on-disk

     version of the test case. */

  if (needs_write) {

    s32 fd;

    unlink(q->fname); /* ignore errors */

    fd = open(q->fname, O_WRONLY | O_CREAT | O_EXCL, 0600);

    if (fd < 0) PFATAL("Unable to create '%s'", q->fname);

    ck_write(fd, in_buf, q->len, q->fname);

    close(fd);

    memcpy(trace_bits, clean_trace, MAP_SIZE);

    update_bitmap_score(q);

  }

abort_trimming:

  bytes_trim_out += q->len;

  return fault;

}

1

2

3

4

5

6

7

8

9

10

11

12

13

14

15

16

17

18

19

20

21

22

23

24

25

26

27

28

29

30

31

32

33

34

35

36

37

38

39

40

41

42

43

44

45

46

47

48

if (!a_extras_cnt) goto skip_extras;

stage_name  = "auto extras (over)";

stage_short = "ext_AO";

stage_cur   = 0;

stage_max   = MIN(a_extras_cnt, USE_AUTO_EXTRAS) * len;

stage_val_type = STAGE_VAL_NONE;

orig_hit_cnt = new_hit_cnt;

for (i = 0; i < len; i++) {

  u32 last_len = 0;

  stage_cur_byte = i;

  for (j = 0; j < MIN(a_extras_cnt, USE_AUTO_EXTRAS); j++) {

    /* See the comment in the earlier code; extras are sorted by size. */

    if (a_extras[j].len > len - i ||

        !memcmp(a_extras[j].data, out_buf + i, a_extras[j].len) ||

        !memchr(eff_map + EFF_APOS(i), 1, EFF_SPAN_ALEN(i, a_extras[j].len))) {

      stage_max--;

      continue;

    }

    last_len = a_extras[j].len;

    memcpy(out_buf + i, a_extras[j].data, last_len);

    if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;

    stage_cur++;

  }

  /* Restore all the clobbered memory. */

  memcpy(out_buf + i, in_buf + i, last_len);

}

new_hit_cnt = queued_paths + unique_crashes;

stage_finds[STAGE_EXTRAS_AO]  += new_hit_cnt - orig_hit_cnt;

stage_cycles[STAGE_EXTRAS_AO] += stage_max;

1

2

3

4

5

6

7

8

9

10

11

12

13

14

15

16

17

18

19

20

21

22

23

24

25

26

27

28

29

30

31

32

33

34

35

36

37

38

39

40

41

42

43

44

45

46

47

48

49

50

51

52

53

54

55

56

57

58

59

60

61

62

63

64

65

66

67

68

69

70

71

72

73

74

75

76

77

78

79

80

81

82

83

84

85

86

87

88

89

90

91

92

93

94

95

96

97

98

99

100

101

102

103

104

105

106

107

108

109

110

111

112

113

114

115

116

117

118

119

120

121

122

123

124

125

126

127

128

129

130

131

132

133

134

135

136

137

138

139

140

141

142

143

144

145

146

147

148

149

150

151

152

153

154

155

156

157

158

159

160

161

162

163

164

165

166

167

168

169

170

171

172

173

174

175

176

177

178

179

180

181

182

183

184

185

186

187

188

189

190

191

192

193

194

195

196

197

198

199

200

201

202

203

204

205

206

207

208

209

210

211

212

213

214

215

216

217

218

219

220

221

222

223

224

225

226

227

228

229

230

231

232

233

234

235

236

237

238

239

240

241

242

243

244

245

246

247

248

249

250

251

252

253

254

255

256

257

258

259

260

261

262

263

264

265

266

267

268

269

270

271

272

273

274

275

276

277

278

279

280

281

282

283

284

285

286

287

288

289

290

291

292

293

294

295

296

297

298

299

300

301

302

303

304

305

306

307

308

309

310

311

312

313

314

315

316

317

318

319

320

321

322

323

324

325

326

327

328

329

330

331

332

333

334

335

336

337

338

339

340

341

342

343

344

345

346

347

348

349

350

351

352

353

354

355

356

357

358

359

360

361

362

363

364

365

366

367

368

369

370

371

372

373

374

375

376

377

378

379

380

381

382

383

384

385

386

387

388

389

390

391

392

393

394

395

396

397

398

399

400

401

402

403

404

405

for (stage_cur = 0; stage_cur < stage_max; stage_cur++) {

  u32 use_stacking = 1 << (1 + UR(HAVOC_STACK_POW2));

  stage_cur_val = use_stacking;

  for (i = 0; i < use_stacking; i++) {

    switch (UR(15 + ((extras_cnt + a_extras_cnt) ? 2 : 0))) {

      case 0:

        /* Flip a single bit somewhere. Spooky! */

        FLIP_BIT(out_buf, UR(temp_len << 3));

        break;

      case 1:

        /* Set byte to interesting value. */

        out_buf[UR(temp_len)] = interesting_8[UR(sizeof(interesting_8))];

        break;

      case 2:

        /* Set word to interesting value, randomly choosing endian. */

        if (temp_len < 2) break;

        if (UR(2)) {

          *(u16*)(out_buf + UR(temp_len - 1)) =

            interesting_16[UR(sizeof(interesting_16) >> 1)];

        } else {

          *(u16*)(out_buf + UR(temp_len - 1)) = SWAP16(

            interesting_16[UR(sizeof(interesting_16) >> 1)]);

        }

        break;

      case 3:

        /* Set dword to interesting value, randomly choosing endian. */

        if (temp_len < 4) break;

        if (UR(2)) {

          *(u32*)(out_buf + UR(temp_len - 3)) =

            interesting_32[UR(sizeof(interesting_32) >> 2)];

        } else {

          *(u32*)(out_buf + UR(temp_len - 3)) = SWAP32(

            interesting_32[UR(sizeof(interesting_32) >> 2)]);

        }

        break;

      case 4:

        /* Randomly subtract from byte. */

        out_buf[UR(temp_len)] -= 1 + UR(ARITH_MAX);

        break;

      case 5:

        /* Randomly add to byte. */

        out_buf[UR(temp_len)] += 1 + UR(ARITH_MAX);

        break;

      case 6:

        /* Randomly subtract from word, random endian. */

        if (temp_len < 2) break;

        if (UR(2)) {

          u32 pos = UR(temp_len - 1);

          *(u16*)(out_buf + pos) -= 1 + UR(ARITH_MAX);

        } else {

          u32 pos = UR(temp_len - 1);

          u16 num = 1 + UR(ARITH_MAX);

          *(u16*)(out_buf + pos) =

            SWAP16(SWAP16(*(u16*)(out_buf + pos)) - num);

        }

        break;

      case 7:

        /* Randomly add to word, random endian. */

        if (temp_len < 2) break;

        if (UR(2)) {

          u32 pos = UR(temp_len - 1);

          *(u16*)(out_buf + pos) += 1 + UR(ARITH_MAX);

        } else {

          u32 pos = UR(temp_len - 1);

          u16 num = 1 + UR(ARITH_MAX);

          *(u16*)(out_buf + pos) =

            SWAP16(SWAP16(*(u16*)(out_buf + pos)) + num);

        }

        break;

      case 8:

        /* Randomly subtract from dword, random endian. */

        if (temp_len < 4) break;

        if (UR(2)) {

          u32 pos = UR(temp_len - 3);

          *(u32*)(out_buf + pos) -= 1 + UR(ARITH_MAX);

        } else {

          u32 pos = UR(temp_len - 3);

          u32 num = 1 + UR(ARITH_MAX);

          *(u32*)(out_buf + pos) =

            SWAP32(SWAP32(*(u32*)(out_buf + pos)) - num);

        }

        break;

      case 9:

        /* Randomly add to dword, random endian. */

        if (temp_len < 4) break;

        if (UR(2)) {

          u32 pos = UR(temp_len - 3);

          *(u32*)(out_buf + pos) += 1 + UR(ARITH_MAX);

        } else {

          u32 pos = UR(temp_len - 3);

          u32 num = 1 + UR(ARITH_MAX);

          *(u32*)(out_buf + pos) =

            SWAP32(SWAP32(*(u32*)(out_buf + pos)) + num);

        }

        break;

      case 10:

        /* Just set a random byte to a random value. Because,

           why not. We use XOR with 1-255 to eliminate the

           possibility of a no-op. */

        out_buf[UR(temp_len)] ^= 1 + UR(255);

        break;

      case 11 ... 12: {

          /* Delete bytes. We're making this a bit more likely

             than insertion (the next option) in hopes of keeping

             files reasonably small. */

          u32 del_from, del_len;

          if (temp_len < 2) break;

          /* Don't delete too much. */

          del_len = choose_block_len(temp_len - 1);

          del_from = UR(temp_len - del_len + 1);

          memmove(out_buf + del_from, out_buf + del_from + del_len,

                  temp_len - del_from - del_len);

          temp_len -= del_len;

          break;

        }

      case 13:

        if (temp_len + HAVOC_BLK_XL < MAX_FILE) {

          /* Clone bytes (75%) or insert a block of constant bytes (25%). */

          u8  actually_clone = UR(4);

          u32 clone_from, clone_to, clone_len;

          u8* new_buf;

          if (actually_clone) {

            clone_len  = choose_block_len(temp_len);

            clone_from = UR(temp_len - clone_len + 1);

          } else {

            clone_len = choose_block_len(HAVOC_BLK_XL);

            clone_from = 0;

          }

          clone_to   = UR(temp_len);

          new_buf = ck_alloc_nozero(temp_len + clone_len);

          /* Head */

          memcpy(new_buf, out_buf, clone_to);

          /* Inserted part */

          if (actually_clone)

            memcpy(new_buf + clone_to, out_buf + clone_from, clone_len);

          else

            memset(new_buf + clone_to,

                   UR(2) ? UR(256) : out_buf[UR(temp_len)], clone_len);

          /* Tail */

          memcpy(new_buf + clone_to + clone_len, out_buf + clone_to,

                 temp_len - clone_to);

          ck_free(out_buf);

          out_buf = new_buf;

          temp_len += clone_len;

        }

        break;

      case 14: {

          /* Overwrite bytes with a randomly selected chunk (75%) or fixed

             bytes (25%). */

          u32 copy_from, copy_to, copy_len;

          if (temp_len < 2) break;

          copy_len  = choose_block_len(temp_len - 1);

          copy_from = UR(temp_len - copy_len + 1);

          copy_to   = UR(temp_len - copy_len + 1);

          if (UR(4)) {

            if (copy_from != copy_to)

              memmove(out_buf + copy_to, out_buf + copy_from, copy_len);

          } else memset(out_buf + copy_to,

                        UR(2) ? UR(256) : out_buf[UR(temp_len)], copy_len);

          break;

        }

      /* Values 15 and 16 can be selected only if there are any extras

         present in the dictionaries. */

      case 15: {

          /* Overwrite bytes with an extra. */

          if (!extras_cnt || (a_extras_cnt && UR(2))) {

            /* No user-specified extras or odds in our favor. Let's use an

               auto-detected one. */

            u32 use_extra = UR(a_extras_cnt);

            u32 extra_len = a_extras[use_extra].len;

            u32 insert_at;

            if (extra_len > temp_len) break;

            insert_at = UR(temp_len - extra_len + 1);

            memcpy(out_buf + insert_at, a_extras[use_extra].data, extra_len);

          } else {

            /* No auto extras or odds in our favor. Use the dictionary. */

            u32 use_extra = UR(extras_cnt);

            u32 extra_len = extras[use_extra].len;

            u32 insert_at;

            if (extra_len > temp_len) break;

            insert_at = UR(temp_len - extra_len + 1);

            memcpy(out_buf + insert_at, extras[use_extra].data, extra_len);

          }

          break;

        }

      case 16: {

          u32 use_extra, extra_len, insert_at = UR(temp_len + 1);

          u8* new_buf;

          /* Insert an extra. Do the same dice-rolling stuff as for the

             previous case. */

          if (!extras_cnt || (a_extras_cnt && UR(2))) {

            use_extra = UR(a_extras_cnt);

            extra_len = a_extras[use_extra].len;

            if (temp_len + extra_len >= MAX_FILE) break;

            new_buf = ck_alloc_nozero(temp_len + extra_len);

            /* Head */

            memcpy(new_buf, out_buf, insert_at);

            /* Inserted part */

            memcpy(new_buf + insert_at, a_extras[use_extra].data, extra_len);

          } else {

            use_extra = UR(extras_cnt);

            extra_len = extras[use_extra].len;

            if (temp_len + extra_len >= MAX_FILE) break;

            new_buf = ck_alloc_nozero(temp_len + extra_len);

            /* Head */

            memcpy(new_buf, out_buf, insert_at);

            /* Inserted part */

            memcpy(new_buf + insert_at, extras[use_extra].data, extra_len);

          }

          /* Tail */

          memcpy(new_buf + insert_at + extra_len, out_buf + insert_at,

                 temp_len - insert_at);

          ck_free(out_buf);

          out_buf   = new_buf;

          temp_len += extra_len;

          break;

        }

    }

  }

  if (common_fuzz_stuff(argv, out_buf, temp_len))

    goto abandon_entry;

  /* out_buf might have been mangled a bit, so let's restore it to its

     original size and shape. */

  if (temp_len < len) out_buf = ck_realloc(out_buf, len);

  temp_len = len;

  memcpy(out_buf, in_buf, len);

  /* If we're finding new stuff, let's run for a bit longer, limits

     permitting. */

  if (queued_paths != havoc_queued) {

    if (perf_score <= HAVOC_MAX_MULT * 100) {

      stage_max  *= 2;

      perf_score *= 2;

    }

    havoc_queued = queued_paths;

  }

}

最后一个阶段,它会随机选择出另外一个输入样例,然后对当前的输入样例和另外一个样例都选择出合适的偏移量,然后从该处将他们拼接起来,然后重新进入到 RANDOM HAVOC 阶段。

1

2

3

4

5

6

7

8

9

10

11

12

13

14

15

16

17

18

19

20

21

22

23

24

25

26

27

28

29

30

31

32

33

34

35

36

37

38

39

40

41

42

43

44

45

46

47

48

49

50

51

52

53

54

55

56

57

58

59

60

61

62

63

64

65

66

67

68

69

70

71

72

73

74

75

76

77

78

79

80

81

82

83

84

85

86

87

88

89

  /************

   * SPLICING *

   ************/

  /* This is a last-resort strategy triggered by a full round with no findings.

     It takes the current input file, randomly selects another input, and

     splices them together at some offset, then relies on the havoc

     code to mutate that blob. */

retry_splicing:

  if (use_splicing && splice_cycle++ < SPLICE_CYCLES &&

      queued_paths > 1 && queue_cur->len > 1) {

    struct queue_entry* target;

    u32 tid, split_at;

    u8* new_buf;

    s32 f_diff, l_diff;

    /* First of all, if we've modified in_buf for havoc, let's clean that

       up... */

    if (in_buf != orig_in) {

      ck_free(in_buf);

      in_buf = orig_in;

      len = queue_cur->len;

    }

    /* Pick a random queue entry and seek to it. Don't splice with yourself. */

    do { tid = UR(queued_paths); } while (tid == current_entry);

    splicing_with = tid;

    target = queue;

    while (tid >= 100) { target = target->next_100; tid -= 100; }

    while (tid--) target = target->next;

    /* Make sure that the target has a reasonable length. */

    while (target && (target->len < 2 || target == queue_cur)) {

      target = target->next;

      splicing_with++;

    }

    if (!target) goto retry_splicing;

    /* Read the testcase into a new buffer. */

    fd = open(target->fname, O_RDONLY);

    if (fd < 0) PFATAL("Unable to open '%s'", target->fname);

    new_buf = ck_alloc_nozero(target->len);

    ck_read(fd, new_buf, target->len, target->fname);

    close(fd);

    /* Find a suitable splicing location, somewhere between the first and

       the last differing byte. Bail out if the difference is just a single

       byte or so. */

    locate_diffs(in_buf, new_buf, MIN(len, target->len), &f_diff, &l_diff);

    if (f_diff < 0 || l_diff < 2 || f_diff == l_diff) {

      ck_free(new_buf);

      goto retry_splicing;

    }

    /* Split somewhere between the first and last differing byte. */

    split_at = f_diff + UR(l_diff - f_diff);

    /* Do the thing. */

    len = target->len;

    memcpy(new_buf, in_buf, split_at);

    in_buf = new_buf;

    ck_free(out_buf);

    out_buf = ck_alloc_nozero(len);

    memcpy(out_buf, in_buf, len);

    goto havoc_stage;

  }

总结来说,这个函数就是先读取有哪些 fuzzer 文件夹,然后读取其他 fuzzer 文件夹下的 queue 文件夹里的 case,并依次执行,如果发现了新 path,就保存到自己的 queue 文件夹里,而且将最后一个 sync 的 case id 写入到 .synced/其他fuzzer文件夹名 文件里,以避免重复运行。

它首先保存了一部分将要被破坏的寄存器,然后调用了 __afl_maybe_log 来记录路径的发现。该函数同样是由汇编编写的,但我们可以用一些其他工具来反编译它:

1

2

3

4

5

6

7

8

9

10

11

12

13

14

15

16

17

18

19

20

21

22

23

24

25

26

27

28

29

30

31

32

33

34

35

36

37

38

39

40

41

42

43

44

45

46

47

48

49

50

51

52

53

54

55

56

57

58

59

60

61

62

63

64

65

66

67

68

69

70

71

72

73

74

75

76

77

char __fastcall _afl_maybe_log(__int64 a1, __int64 a2, __int64 a3, __int64 a4)

{

  char v4; // of

  char v5; // al

  __int64 v6; // rdx

  __int64 v7; // rcx

  char *v9; // rax

  int v10; // eax

  void *v11; // rax

  int v12; // edi

  __int64 v13; // rax

  __int64 v14; // rax

  __int64 v15; // [rsp-10h] [rbp-180h]

  char v16; // [rsp+10h] [rbp-160h]

  __int64 v17; // [rsp+18h] [rbp-158h]

  v5 = v4;

  v6 = _afl_area_ptr;

  if ( !_afl_area_ptr )

  {

    if ( _afl_setup_failure )

      return v5 + 127;

    v6 = _afl_global_area_ptr;

    if ( _afl_global_area_ptr )

    {

      _afl_area_ptr = _afl_global_area_ptr;

    }

    else

    {

      v16 = v4;

      v17 = a4;

      v9 = getenv("__AFL_SHM_ID");

      if ( !v9 || (v10 = atoi(v9), v11 = shmat(v10, 0LL, 0), v11 == -1LL) )

      {

        ++_afl_setup_failure;

        v5 = v16;

        return v5 + 127;

      }

      _afl_area_ptr = v11;

      _afl_global_area_ptr = v11;

      v15 = v11;

      if ( write(199, &_afl_temp, 4uLL) == 4 )

      {

        while ( 1 )

        {

          v12 = 198;

          if ( read(198, &_afl_temp, 4uLL) != 4 )

            break;

          LODWORD(v13) = fork();

          if ( v13 < 0 )

            break;

          if ( !v13 )

            goto __afl_fork_resume;

          _afl_fork_pid = v13;

          write(199, &_afl_fork_pid, 4uLL);

          v12 = _afl_fork_pid;

          LODWORD(v14) = waitpid(_afl_fork_pid, &_afl_temp, 0);

          if ( v14 <= 0 )

            break;

          write(199, &_afl_temp, 4uLL);

        }

        _exit(v12);

      }

__afl_fork_resume:

      close(198);

      close(199);

      v6 = v15;

      v5 = v16;

      a4 = v17;

    }

  }

  v7 = _afl_prev_loc ^ a4;

  _afl_prev_loc ^= v7;

  _afl_prev_loc = _afl_prev_loc >> 1;

  ++*(v6 + v7);

  return v5 + 127;

}

此处 v6 即为共享内存的地址,而 a4cur_location ,因此 v7=cur_location ^ prev_location ,它将作为索引,使得共享内存中的对应偏移处的值增加。而在 fuzzer 部分就可以通过检查这块内存来发现是否有新路径被得到了。

另外,_afl_prev_loc = _afl_prev_loc >> 1; 的目的是为了避开 A->AB->B 以及 A->BB->A 被识别为相同路径的情况。


文章来源: https://bbs.pediy.com/thread-276769.htm
如有侵权请联系:admin#unsafe.sh