exit with correct error code when infer.py fails

Summary:
The error code was always 1, and was only enabled in crashcontext mode due to a
typo.

Reviewed By: sblackshear, lazaroclapp

Differential Revision: D3735661

fbshipit-source-id: c0bb0f5
master
Jules Villard 8 years ago committed by Facebook Github Bot 8
parent 90c8f55c32
commit dda4921786

@ -111,33 +111,37 @@ let () =
let analysis_is_crashcontext = match Config.analyzer with let analysis_is_crashcontext = match Config.analyzer with
| Some Crashcontext -> true | Some Crashcontext -> true
| _ -> false in | _ -> false in
if analysis_is_crashcontext then (if analysis_is_crashcontext then
(* check whether this is the top-level infer process *) (* check whether this is the top-level infer process *)
let top_level_infer = let top_level_infer =
(* if the '--buck' option was passed, then this is the top level process iff the build command (* if the '--buck' option was passed, then this is the top level process iff the build
starts with 'buck' *) command starts with 'buck' *)
if Config.buck then buck if Config.buck then buck
(* otherwise, we assume javac as the build command and thus only one process *) (* otherwise, we assume javac as the build command and thus only one process *)
else true in else true in
if top_level_infer then if top_level_infer then
(* if we are the top-level process, then find the output directory and collect all (* if we are the top-level process, then find the output directory and collect all
crashcontext summaries under it in a single crashcontext.json file *) crashcontext summaries under it in a single crashcontext.json file *)
let root_out_dir = if buck then begin let root_out_dir = if buck then begin
let project_root = match Config.project_root with let project_root = match Config.project_root with
| Some root -> root | Some root -> root
| None -> Filename.dirname Config.results_dir in | None -> Filename.dirname Config.results_dir in
let buck_out = match Config.buck_out with let buck_out = match Config.buck_out with
| Some dir -> dir | Some dir -> dir
| None -> "buck-out" in | None -> "buck-out" in
Filename.concat project_root buck_out Filename.concat project_root buck_out
end end
else Config.results_dir in else Config.results_dir in
match Config.stacktrace with match Config.stacktrace with
| None -> failwith "Detected -a crashcontext without --stacktrace, \ | None -> failwith "Detected -a crashcontext without --stacktrace, \
this should have been checked earlier." this should have been checked earlier."
| Some s -> Crashcontext.collect_all_summaries root_out_dir s; | Some s -> Crashcontext.collect_all_summaries root_out_dir s
if status <> Unix.WEXITED 0 then (
prerr_endline ("Failed to execute: " ^ (String.concat " " (Array.to_list args_py))) ;
exit 1
); );
() let exit_code = match status with
| Unix.WEXITED i -> i
| _ -> 1 in
if exit_code <> 0 then (
if not (exit_code = 2 && Config.fail_on_bug) then
prerr_endline ("Failed to execute: " ^ (String.concat " " (Array.to_list args_py))) ;
exit exit_code
)

@ -62,6 +62,8 @@ ALL_TESTS = [
'buck', 'buck',
'cc1', 'cc1',
'cmake', 'cmake',
'componentkit',
'fail',
'gradle', 'gradle',
'javac', 'javac',
'locale', 'locale',
@ -114,7 +116,7 @@ def save_report(reports, filename):
separators=(',', ': '), sort_keys=True) separators=(',', ': '), sort_keys=True)
def run_analysis(clean_cmds, build_cmds, extra_check, env=None): def run_analysis(clean_cmds, build_cmds, extra_check, should_fail, env=None):
for clean_cmd in clean_cmds: for clean_cmd in clean_cmds:
subprocess.check_call(clean_cmd, env=env) subprocess.check_call(clean_cmd, env=env)
@ -134,7 +136,16 @@ def run_analysis(clean_cmds, build_cmds, extra_check, env=None):
mode='w', mode='w',
suffix='.out', suffix='.out',
prefix='analysis_') as analysis_output: prefix='analysis_') as analysis_output:
subprocess.check_call(infer_cmd, stdout=analysis_output, env=env) try:
subprocess.check_call(infer_cmd,
stdout=analysis_output, env=env)
if should_fail is not None:
# hacky since we should clean up infer-out, etc. as below
# if you made the test fails, this is your punishment
assert False
except subprocess.CalledProcessError, exn:
if exn.returncode != should_fail:
raise
json_path = os.path.join(temp_out_dir, REPORT_JSON) json_path = os.path.join(temp_out_dir, REPORT_JSON)
found_errors = utils.load_json_from_path(json_path) found_errors = utils.load_json_from_path(json_path)
@ -228,6 +239,7 @@ def test(name,
enabled=None, enabled=None,
report_fname=None, report_fname=None,
extra_check=lambda x: None, extra_check=lambda x: None,
should_fail=None,
preprocess=lambda: None, preprocess=lambda: None,
postprocess=lambda errors: errors): postprocess=lambda errors: errors):
"""Run a test. """Run a test.
@ -244,6 +256,10 @@ def test(name,
- [enabled] whether the test should attempt to run. By default it - [enabled] whether the test should attempt to run. By default it
is enabled if [[name] in [to_test]] is enabled if [[name] in [to_test]]
- [report_fname] where to find the expected Infer results - [report_fname] where to find the expected Infer results
- [extra_check] some function that will be given the temporary
results directory as argument
- [should_fail] if not None then running infer is expected to fail
with [should_fail] error code
- [preprocess] a function to run before the clean and compile - [preprocess] a function to run before the clean and compile
commands. If the function returns something non-None, use that as commands. If the function returns something non-None, use that as
the compile commands. the compile commands.
@ -251,6 +267,7 @@ def test(name,
modify them. It must return an Infer report. modify them. It must return an Infer report.
Returns [True] if the test ran, [False] otherwise. Returns [True] if the test ran, [False] otherwise.
""" """
# python can't into using values of arguments in the default # python can't into using values of arguments in the default
# values of other arguments # values of other arguments
@ -282,6 +299,7 @@ def test(name,
clean_commands, clean_commands,
compile_commands, compile_commands,
extra_check=extra_check, extra_check=extra_check,
should_fail=should_fail,
env=env) env=env)
original = os.path.join(EXPECTED_OUTPUTS_DIR, report_fname) original = os.path.join(EXPECTED_OUTPUTS_DIR, report_fname)
do_test(postprocess(errors), original) do_test(postprocess(errors), original)
@ -481,6 +499,13 @@ class BuildIntegrationTest(unittest.TestCase):
'TestIgnoreImports.mm'], 'TestIgnoreImports.mm'],
'infer_args': ['--cxx', '--no-filtering']}]) 'infer_args': ['--cxx', '--no-filtering']}])
def test_fail_on_issue(self):
test('fail', '--fail-on-issue flag',
CODETOANALYZE_DIR,
[{'compile': ['clang', '-c', 'hello.c'],
'infer_args': ['--fail-on-issue']}],
should_fail=2)
def test_pmd_xml_output(self): def test_pmd_xml_output(self):
def pmd_check(infer_out): def pmd_check(infer_out):
assert os.path.exists(os.path.join(infer_out, 'report.xml')) assert os.path.exists(os.path.join(infer_out, 'report.xml'))

@ -0,0 +1,7 @@
[
{
"bug_type": "NULL_DEREFERENCE",
"file": "hello.c",
"procedure": "test"
}
]
Loading…
Cancel
Save