move code about infer issues formats to issues.py

Reviewed By: jeremydubreil

Differential Revision: D2642057

fb-gh-sync-id: d22c62f
master
Jules Villard 9 years ago committed by facebook-github-bot-7
parent a416afeb44
commit 88b615d8ab

@ -30,7 +30,7 @@ import time
import traceback import traceback
import zipfile import zipfile
from inferlib import analyze, utils from inferlib import analyze, issues, utils
ANALYSIS_SUMMARY_OUTPUT = 'analysis_summary.txt' ANALYSIS_SUMMARY_OUTPUT = 'analysis_summary.txt'
@ -448,7 +448,7 @@ def collect_results(args, start_time):
report.flush() report.flush()
print('\n') print('\n')
analyze.print_errors(json_report, bugs_out) issues.print_errors(json_report, bugs_out)
stats['int']['total_time'] = int(round(utils.elapsed_time(start_time))) stats['int']['total_time'] = int(round(utils.elapsed_time(start_time)))

@ -21,9 +21,7 @@ import os
import shutil import shutil
import subprocess import subprocess
import sys import sys
import tempfile
import time import time
import xml.etree.ElementTree as ET
from . import config, issues, jwlib, utils from . import config, issues, jwlib, utils
@ -227,180 +225,6 @@ def help_exit(message):
exit(1) exit(1)
def compare_csv_rows(row_1, row_2):
filename_1 = row_1[utils.CSV_INDEX_FILENAME]
filename_2 = row_2[utils.CSV_INDEX_FILENAME]
if filename_1 < filename_2:
return -1
elif filename_1 > filename_2:
return 1
else:
line_1 = int(row_1[utils.CSV_INDEX_LINE])
line_2 = int(row_2[utils.CSV_INDEX_LINE])
return line_1 - line_2
def compare_json_rows(row_1, row_2):
filename_1 = row_1[utils.JSON_INDEX_FILENAME]
filename_2 = row_2[utils.JSON_INDEX_FILENAME]
if filename_1 < filename_2:
return -1
elif filename_1 > filename_2:
return 1
else:
line_1 = int(row_1[utils.JSON_INDEX_LINE])
line_2 = int(row_2[utils.JSON_INDEX_LINE])
return line_1 - line_2
def should_report(analyzer, error_kind, error_type, error_bucket):
analyzers_whitelist = [
config.ANALYZER_ERADICATE,
config.ANALYZER_CHECKERS,
config.ANALYZER_TRACING,
]
error_kinds = [issues.ISSUE_KIND_ERROR, issues.ISSUE_KIND_WARNING]
null_style_buckets = ['B1', 'B2']
if analyzer in analyzers_whitelist:
return True
if error_kind not in error_kinds:
return False
if not error_type:
return False
if error_type in issues.NULL_STYLE_ISSUE_TYPES:
return error_bucket in null_style_buckets
return error_type in issues.ISSUE_TYPES
def should_report_csv(analyzer, row):
error_kind = row[utils.CSV_INDEX_KIND]
error_type = row[utils.CSV_INDEX_TYPE]
error_bucket = '' # can be updated later once we extract it from qualifier
try:
qualifier_xml = ET.fromstring(row[utils.CSV_INDEX_QUALIFIER_TAGS])
if qualifier_xml.tag == utils.QUALIFIER_TAGS:
bucket = qualifier_xml.find(utils.BUCKET_TAGS)
if bucket is not None:
error_bucket = bucket.text
except ET.ParseError:
pass # this will skip any invalid xmls
return should_report(analyzer, error_kind, error_type, error_bucket)
def should_report_json(analyzer, row):
error_kind = row[utils.JSON_INDEX_KIND]
error_type = row[utils.JSON_INDEX_TYPE]
error_bucket = '' # can be updated later once we extract it from qualifier
for qual_tag in row[utils.QUALIFIER_TAGS]:
if qual_tag['tag'] == utils.BUCKET_TAGS:
error_bucket = qual_tag['value']
break
return should_report(analyzer, error_kind, error_type, error_bucket)
def clean_json(args, json_report):
collected_rows = []
with codecs.open(json_report, 'r', encoding=utils.LOCALE) as file_in:
rows = json.load(file_in)
for row in rows:
filename = row[utils.JSON_INDEX_FILENAME]
if os.path.isfile(filename):
if args.no_filtering or should_report_json(args.analyzer, row):
collected_rows.append(row)
collected_rows = sorted(
collected_rows,
cmp=compare_json_rows)
temporary_file = tempfile.mktemp()
with codecs.open(temporary_file, 'w', encoding=utils.LOCALE) as file_out:
json.dump(collected_rows, file_out)
file_out.flush()
shutil.move(temporary_file, json_report)
def clean_csv(args, csv_report):
collected_rows = []
with open(csv_report, 'r') as file_in:
reader = utils.locale_csv_reader(file_in)
rows = [row for row in reader]
if len(rows) <= 1:
return rows
else:
for row in rows[1:]:
filename = row[utils.CSV_INDEX_FILENAME]
if os.path.isfile(filename):
if args.no_filtering \
or should_report_csv(args.analyzer, row):
collected_rows.append(row)
collected_rows = sorted(
collected_rows,
cmp=compare_csv_rows)
collected_rows = [rows[0]] + collected_rows
temporary_file = tempfile.mktemp()
with open(temporary_file, 'w') as file_out:
writer = csv.writer(file_out)
writer.writerows(collected_rows)
file_out.flush()
shutil.move(temporary_file, csv_report)
def print_and_write(file_out, message):
print(message)
file_out.write(message + '\n')
def print_errors(json_report, bugs_out):
with codecs.open(json_report, 'r', encoding=utils.LOCALE) as file_in:
errors = json.load(file_in)
errors = filter(lambda row: row[utils.JSON_INDEX_KIND] in
[issues.ISSUE_KIND_ERROR, issues.ISSUE_KIND_WARNING],
errors)
with codecs.open(bugs_out, 'w', encoding=utils.LOCALE) as file_out:
text_errors_list = []
for row in errors:
filename = row[utils.JSON_INDEX_FILENAME]
if os.path.isfile(filename):
kind = row[utils.JSON_INDEX_KIND]
line = row[utils.JSON_INDEX_LINE]
error_type = row[utils.JSON_INDEX_TYPE]
msg = row[utils.JSON_INDEX_QUALIFIER]
indenter = utils.Indenter()
indenter.indent_push()
indenter.add(
utils.build_source_context(filename,
utils.TERMINAL_FORMATTER,
int(line)))
source_context = unicode(indenter)
text_errors_list.append(
u'{0}:{1}: {2}: {3}\n {4}\n{5}'.format(
filename,
line,
kind.lower(),
error_type,
msg,
source_context,
)
)
n_issues = len(text_errors_list)
if n_issues == 0:
print_and_write(file_out, 'No issues found')
else:
msg = '\nFound %s\n' % utils.get_plural('issue', n_issues)
print_and_write(file_out, msg)
text_errors = '\n\n'.join(text_errors_list)
print_and_write(file_out, text_errors)
def run_command(cmd, debug_mode, javac_arguments, step, analyzer): def run_command(cmd, debug_mode, javac_arguments, step, analyzer):
if debug_mode: if debug_mode:
print('\n{0}\n'.format(' '.join(cmd))) print('\n{0}\n'.format(' '.join(cmd)))
@ -668,8 +492,8 @@ class Infer:
logging.error('Error with InferPrint with the command: ' logging.error('Error with InferPrint with the command: '
+ infer_print_cmd) + infer_print_cmd)
else: else:
clean_csv(self.args, csv_report) issues.clean_csv(self.args, csv_report)
clean_json(self.args, json_report) issues.clean_json(self.args, json_report)
self.update_stats_with_warnings(csv_report) self.update_stats_with_warnings(csv_report)
return exit_status return exit_status
@ -725,7 +549,7 @@ class Infer:
utils.JSON_REPORT_FILENAME) utils.JSON_REPORT_FILENAME)
bugs_out = os.path.join(self.args.infer_out, bugs_out = os.path.join(self.args.infer_out,
utils.BUGS_FILENAME) utils.BUGS_FILENAME)
print_errors(json_report, bugs_out) issues.print_errors(json_report, bugs_out)
def print_analysis_stats(self): def print_analysis_stats(self):
procs_total = self.stats['int']['procedures'] procs_total = self.stats['int']['procedures']

@ -139,7 +139,7 @@ class BuckAnalyzer:
utils.merge_json_reports( utils.merge_json_reports(
result_files, result_files,
merged_results_path) merged_results_path)
# TODO: adapt analyze.print_errors to support json and print on screen # TODO: adapt issues.print_errors to support json and print on screen
print('Results saved in {results_path}'.format( print('Results saved in {results_path}'.format(
results_path=merged_results_path)) results_path=merged_results_path))
return os.EX_OK return os.EX_OK

@ -10,6 +10,20 @@ from __future__ import division
from __future__ import print_function from __future__ import print_function
from __future__ import unicode_literals from __future__ import unicode_literals
import codecs
import csv
import json
import os
import shutil
import sys
import tempfile
import xml.etree.ElementTree as ET
from . import config, utils
# Increase the limit of the CSV parser to sys.maxlimit
csv.field_size_limit(sys.maxsize)
ISSUE_KIND_ERROR = 'ERROR' ISSUE_KIND_ERROR = 'ERROR'
ISSUE_KIND_WARNING = 'WARNING' ISSUE_KIND_WARNING = 'WARNING'
@ -38,3 +52,177 @@ NULL_STYLE_ISSUE_TYPES = [
'PARAMETER_NOT_NULL_CHECKED', 'PARAMETER_NOT_NULL_CHECKED',
'PREMATURE_NIL_TERMINATION_ARGUMENT', 'PREMATURE_NIL_TERMINATION_ARGUMENT',
] ]
def clean_csv(args, csv_report):
collected_rows = []
with open(csv_report, 'r') as file_in:
reader = csv.reader(file_in)
rows = [row for row in reader]
if len(rows) <= 1:
return rows
else:
for row in rows[1:]:
filename = row[utils.CSV_INDEX_FILENAME]
if os.path.isfile(filename):
if args.no_filtering \
or _should_report_csv(args.analyzer, row):
collected_rows.append(row)
collected_rows = sorted(
collected_rows,
cmp=_compare_csv_rows)
collected_rows = [rows[0]] + collected_rows
temporary_file = tempfile.mktemp()
with open(temporary_file, 'w') as file_out:
writer = csv.writer(file_out)
writer.writerows(collected_rows)
file_out.flush()
shutil.move(temporary_file, csv_report)
def clean_json(args, json_report):
collected_rows = []
with open(json_report, 'r') as file_in:
rows = json.load(file_in)
for row in rows:
filename = row[utils.JSON_INDEX_FILENAME]
if os.path.isfile(filename):
if args.no_filtering \
or _should_report_json(args.analyzer, row):
collected_rows.append(row)
collected_rows = sorted(
collected_rows,
cmp=_compare_json_rows)
temporary_file = tempfile.mktemp()
with open(temporary_file, 'w') as file_out:
json.dump(collected_rows, file_out)
file_out.flush()
shutil.move(temporary_file, json_report)
def print_errors(json_report, bugs_out):
with codecs.open(json_report, 'r', encoding=utils.LOCALE) as file_in:
errors = json.load(file_in)
errors = filter(lambda row: row[utils.JSON_INDEX_KIND] in
[ISSUE_KIND_ERROR, ISSUE_KIND_WARNING],
errors)
with codecs.open(bugs_out, 'w', encoding=utils.LOCALE) as file_out:
text_errors_list = []
for row in errors:
filename = row[utils.JSON_INDEX_FILENAME]
if os.path.isfile(filename):
kind = row[utils.JSON_INDEX_KIND]
line = row[utils.JSON_INDEX_LINE]
error_type = row[utils.JSON_INDEX_TYPE]
msg = row[utils.JSON_INDEX_QUALIFIER]
indenter = utils.Indenter()
indenter.indent_push()
indenter.add(
utils.build_source_context(filename,
utils.TERMINAL_FORMATTER,
int(line)))
source_context = unicode(indenter)
text_errors_list.append(
u'{0}:{1}: {2}: {3}\n {4}\n{5}'.format(
filename,
line,
kind.lower(),
error_type,
msg,
source_context,
)
)
n_issues = len(text_errors_list)
if n_issues == 0:
_print_and_write(file_out, 'No issues found')
else:
msg = '\nFound %s\n' % utils.get_plural('issue', n_issues)
_print_and_write(file_out, msg)
text_errors = '\n\n'.join(text_errors_list)
_print_and_write(file_out, text_errors)
def _compare_issues(filename_1, line_1, filename_2, line_2):
if filename_1 < filename_2:
return -1
elif filename_1 > filename_2:
return 1
else:
return line_1 - line_2
def _compare_csv_rows(row_1, row_2):
filename_1 = row_1[utils.CSV_INDEX_FILENAME]
filename_2 = row_2[utils.CSV_INDEX_FILENAME]
line_1 = int(row_1[utils.CSV_INDEX_LINE])
line_2 = int(row_2[utils.CSV_INDEX_LINE])
return _compare_issues(filename_1, line_1, filename_2, line_2)
def _compare_json_rows(row_1, row_2):
filename_1 = row_1[utils.JSON_INDEX_FILENAME]
filename_2 = row_2[utils.JSON_INDEX_FILENAME]
line_1 = row_1[utils.JSON_INDEX_LINE]
line_2 = row_2[utils.JSON_INDEX_LINE]
return _compare_issues(filename_1, line_1, filename_2, line_2)
def _should_report(analyzer, error_kind, error_type, error_bucket):
analyzers_whitelist = [
config.ANALYZER_ERADICATE,
config.ANALYZER_CHECKERS,
config.ANALYZER_TRACING,
]
error_kinds = [ISSUE_KIND_ERROR, ISSUE_KIND_WARNING]
null_style_buckets = ['B1', 'B2']
if analyzer in analyzers_whitelist:
return True
if error_kind not in error_kinds:
return False
if not error_type:
return False
if error_type in NULL_STYLE_ISSUE_TYPES:
return error_bucket in null_style_buckets
return error_type in ISSUE_TYPES
def _should_report_csv(analyzer, row):
error_kind = row[utils.CSV_INDEX_KIND]
error_type = row[utils.CSV_INDEX_TYPE]
error_bucket = '' # can be updated later once we extract it from qualifier
try:
qualifier_xml = ET.fromstring(row[utils.CSV_INDEX_QUALIFIER_TAGS])
if qualifier_xml.tag == utils.QUALIFIER_TAGS:
bucket = qualifier_xml.find(utils.BUCKET_TAGS)
if bucket is not None:
error_bucket = bucket.text
except ET.ParseError:
pass # this will skip any invalid xmls
return _should_report(analyzer, error_kind, error_type, error_bucket)
def _should_report_json(analyzer, row):
error_kind = row[utils.JSON_INDEX_KIND]
error_type = row[utils.JSON_INDEX_TYPE]
error_bucket = '' # can be updated later once we extract it from qualifier
for qual_tag in row[utils.QUALIFIER_TAGS]:
if qual_tag['tag'] == utils.BUCKET_TAGS:
error_bucket = qual_tag['value']
break
return _should_report(analyzer, error_kind, error_type, error_bucket)
def _print_and_write(file_out, message):
print(message)
file_out.write(message + '\n')

Loading…
Cancel
Save