[infer] fix inconsistencies between the errors that are saved in the generated reports and what is saved in standard output

master
jrm 10 years ago
parent 8f9cef0320
commit aa7bf8e69b

@ -64,6 +64,9 @@ def prepare_build(args):
if args.debug:
inferJ_options.append('--debug')
if args.no_filtering:
inferJ_options.append('--no-filtering')
if args.analyzer_mode:
inferJ_options.append('--analyzer_mode')
inferJ_options.append(args.analyzer_mode)
@ -428,7 +431,7 @@ def collect_results(args, start_time):
utils.create_json_report(args.infer_out)
print('\n')
inferlib.print_errors(csv_report, bugs_out, args.analyzer)
inferlib.print_errors(csv_report, bugs_out)
stats['int']['total_time'] = int(round(utils.elapsed_time(start_time)))

@ -8,7 +8,6 @@ from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import logging
import sys
import inferlib

@ -41,6 +41,10 @@ MODES = [COMPILE, CAPTURE, INFER, ERADICATE, CHECKERS, TRACING]
INFER_ANALYZE_BINARY = "InferAnalyze"
ERROR = 'ERROR'
WARNING = 'WARNING'
INFO = 'INFO'
class AbsolutePathAction(argparse.Action):
"""Convert a path from relative to absolute in the arg parser"""
def __call__(self, parser, namespace, values, option_string=None):
@ -85,6 +89,10 @@ base_group.add_argument('-a', '--analyzer',
base_group.add_argument('-m', '--analyzer_mode', metavar='<analyzer_mode>',
help='''Select a special analyzer mode such as
graphql1 or graphql2''')
base_group.add_argument('-nf', '--no-filtering', action='store_true',
help='''Also show the results from the experimental
checks. Warning: some checks may contain many false
alarms''')
base_parser.add_argument('-v', '--version', help='Get version of the analyzer',
action=VersionAction)
@ -190,31 +198,7 @@ def compare_rows(row_1, row_2):
return line_1 - line_2
def sort_csv(csv_report, infer_out):
collected_rows = []
with open(csv_report, 'r') as file_in:
reader = csv.reader(file_in)
rows = [row for row in reader]
if len(rows) <= 1:
return rows
else:
for row in rows[1:]:
filename = row[utils.CSV_INDEX_FILENAME]
if os.path.isfile(filename):
collected_rows.append(row)
collected_rows = sorted(
collected_rows,
cmp=compare_rows)
collected_rows = [rows[0]] + collected_rows
temporary_file = tempfile.mktemp()
with open(temporary_file, 'w') as file_out:
writer = csv.writer(file_out)
writer.writerows(collected_rows)
file_out.flush()
shutil.move(temporary_file, csv_report)
def should_print(analyzer, row):
def should_report(analyzer, row):
error_kind = row[utils.CSV_INDEX_KIND]
error_type = row[utils.CSV_INDEX_TYPE]
error_bucket = '' # can be updated later once we extract it from qualifier
@ -229,7 +213,7 @@ def should_print(analyzer, row):
pass # this will skip any invalid xmls
# config what to print is listed below
error_kinds = ['ERROR', 'WARNING']
error_kinds = [ERROR, WARNING]
null_style_bugs = [
'NULL_DEREFERENCE',
@ -241,17 +225,13 @@ def should_print(analyzer, row):
other_bugs = ['RESOURCE_LEAK', 'MEMORY_LEAK', 'RETAIN_CYCLE']
filter_by_type = True
if analyzer in [ERADICATE, CHECKERS]:
# report all issues for eredicate and checkers
filter_by_type = False
if analyzer in [ERADICATE, CHECKERS, TRACING]:
# report all issues for eradicate and checkers
return True
if error_kind not in error_kinds:
return False
if not filter_by_type:
return True
if not error_type:
return False
@ -266,6 +246,31 @@ def should_print(analyzer, row):
return False
def clean_csv(args, csv_report):
collected_rows = []
with open(csv_report, 'r') as file_in:
reader = csv.reader(file_in)
rows = [row for row in reader]
if len(rows) <= 1:
return rows
else:
for row in rows[1:]:
filename = row[utils.CSV_INDEX_FILENAME]
if os.path.isfile(filename):
if args.no_filtering or should_report(args.analyzer, row):
collected_rows.append(row)
collected_rows = sorted(
collected_rows,
cmp=compare_rows)
collected_rows = [rows[0]] + collected_rows
temporary_file = tempfile.mktemp()
with open(temporary_file, 'w') as file_out:
writer = csv.writer(file_out)
writer.writerows(collected_rows)
file_out.flush()
shutil.move(temporary_file, csv_report)
def remove_bucket(bug_message):
""" Remove anything from the beginning if the message that
looks like a bucket """
@ -277,15 +282,16 @@ def print_and_write(file_out, message):
file_out.write(message + '\n')
def print_errors(csv_report, bugs_out, analyzer):
def print_errors(csv_report, bugs_out):
with open(csv_report, 'r') as file_in:
reader = csv.reader(file_in)
reader.next() # first line is header, skip it
errors = filter(
lambda row: should_print(analyzer, row),
lambda row: row[utils.CSV_INDEX_KIND] in [ERROR, WARNING],
reader
)
with open(bugs_out, 'w') as file_out:
if not errors:
print_and_write(file_out, 'No issues found')
@ -360,6 +366,7 @@ class Infer:
self.stats = {'int': {}, 'float': {}}
self.timing = {}
def clean_exit(self):
if os.path.isdir(self.args.infer_out):
print('removing', self.args.infer_out)
@ -578,13 +585,13 @@ class Infer:
logging.error('Error with InferPrint with the command: '
+ infer_print_cmd)
else:
sort_csv(csv_report, self.args.infer_out)
clean_csv(self.args, csv_report)
self.update_stats(csv_report)
utils.create_json_report(self.args.infer_out)
print('\n')
if not self.args.buck:
print_errors(csv_report, bugs_out, self.args.analyzer)
print_errors(csv_report, bugs_out)
return exit_status

@ -31,6 +31,8 @@ class BuckAnalyzer:
capture_cmd = [utils.get_cmd_in_bin_dir('BuckAnalyze')]
if self.args.debug:
capture_cmd.append('-g')
if self.args.no_filtering:
capture_cmd.append('--no-filtering')
capture_cmd += self.cmd
capture_cmd += ['--analyzer', self.args.analyzer]
try:

@ -31,6 +31,8 @@ class JavacCapture:
capture_cmd = [utils.get_cmd_in_bin_dir('inferJ')]
capture_cmd += ['--out', self.args.infer_out]
capture_cmd += ['--analyzer', self.args.analyzer]
if self.args.no_filtering:
capture_cmd.append('--no-filtering')
if self.args.debug:
capture_cmd.append('-g')
capture_cmd += self.cmd

@ -5,7 +5,14 @@ out = 'out'
clean_cmd = ' '.join(['rm', '-rf', out])
env_cmd = ' '.join(['export', 'REPORT_ASSERTION_FAILURE=1'])
infer_cmd = ' '.join(['infer', '-o', 'out', '--testing_mode', '--', 'make'])
infer_cmd = ' '.join([
'infer',
'--no-filtering',
'-o', 'out',
'--testing_mode',
'--',
'make'
])
copy_cmd = ' '.join(['cp', out + '/report.csv', '$OUT'])
command = ' && '.join([clean_cmd, env_cmd, infer_cmd, copy_cmd])

@ -22,6 +22,7 @@ def analysis_cmd(analyzer):
classpath = ':'.join([('$(classpath ' + path + ')') for path in dependencies])
infer_cmd = ' '.join([
'infer',
'--no-filtering',
'-o', out,
'-a', analyzer,
'--',

@ -83,6 +83,7 @@ public class InferRunner {
return new ImmutableList.Builder<String>()
.add("infer")
.add("--no-filtering")
.addAll(args)
.add("-o")
.add(resultsDirName)
@ -222,6 +223,7 @@ public class InferRunner {
.add(ml_buckets == null ? "all" : ml_buckets);
ImmutableList<String> inferCmd = new ImmutableList.Builder<String>()
.add("infer")
.add("--no-filtering")
.add("--out")
.add(resultsDirName)
.add("--testing_mode")

Loading…
Cancel
Save