generate json natively

Summary: public
Use InferPrint to generate infer-out/report.json instead of converting
infer-out/report.csv. The json may soon contain more info than the CSV.

Reviewed By: jeremydubreil

Differential Revision: D2603775

fb-gh-sync-id: f141dfe
master
Jules Villard 9 years ago committed by facebook-github-bot-1
parent db35afef1e
commit d03dcb6961

@ -1,12 +1,12 @@
[
{
"type": "NULL_DEREFERENCE",
"procedure": "void MainActivity.onCreate(Bundle)",
"file": "app/src/main/java/infer/inferandroidexample/MainActivity.java"
"file": "app/src/main/java/infer/inferandroidexample/MainActivity.java",
"bug_type": "NULL_DEREFERENCE"
},
{
"type": "RESOURCE_LEAK",
"procedure": "void MainActivity.writeToFile()",
"file": "app/src/main/java/infer/inferandroidexample/MainActivity.java"
"file": "app/src/main/java/infer/inferandroidexample/MainActivity.java",
"bug_type": "RESOURCE_LEAK"
}
]

@ -40,7 +40,10 @@ BUCK_CONFIG_BACKUP = '.buckconfig.local.backup_generated_by_infer'
DEFAULT_BUCK_OUT = os.path.join(os.getcwd(), 'buck-out')
DEFAULT_BUCK_OUT_GEN = os.path.join(DEFAULT_BUCK_OUT, 'gen')
INFER_REPORT = os.path.join(utils.BUCK_INFER_OUT, utils.CSV_REPORT_FILENAME)
INFER_CSV_REPORT = os.path.join(utils.BUCK_INFER_OUT,
utils.CSV_REPORT_FILENAME)
INFER_JSON_REPORT = os.path.join(utils.BUCK_INFER_OUT,
utils.JSON_REPORT_FILENAME)
INFER_STATS = os.path.join(utils.BUCK_INFER_OUT, utils.STATS_FILENAME)
INFER_SCRIPT = """\
@ -332,24 +335,19 @@ def load_stats(opened_jar):
raise NotFoundInJar
def load_report(opened_jar):
def load_csv_report(opened_jar):
try:
sio = io.StringIO(opened_jar.read(INFER_REPORT).decode())
sio = io.StringIO(opened_jar.read(INFER_CSV_REPORT).decode())
return list(csv.reader(sio))
except KeyError as e:
raise NotFoundInJar
def rows_remove_duplicates(rows):
seen = {}
result = []
for row in rows:
t = tuple(row)
if t in seen:
continue
seen[t] = 1
result.append(row)
return result
def load_json_report(opened_jar):
try:
return json.loads(opened_jar.read(INFER_JSON_REPORT).decode())
except KeyError as e:
raise NotFoundInJar
def collect_results(args, start_time):
@ -361,7 +359,8 @@ def collect_results(args, start_time):
with open(os.path.join(args.infer_out, ANALYSIS_SUMMARY_OUTPUT), 'w') as f:
f.write(buck_stats)
all_rows = []
all_csv_rows = set()
all_json_rows = set()
headers = []
stats = init_stats(args, start_time)
@ -398,10 +397,15 @@ def collect_results(args, start_time):
old_value = stats[type_k].get(key, 0)
stats[type_k][key] = old_value + value
rows = load_report(jar)
if len(rows) > 0:
headers.append(rows[0])
all_rows.extend(rows[1:])
csv_rows = load_csv_report(jar)
if len(csv_rows) > 0:
headers.append(csv_rows[0])
for row in csv_rows[1:]:
all_csv_rows.add(tuple(row))
json_rows = load_json_report(jar)
for row in json_rows:
all_json_rows.add(json.dumps(row))
# Override normals
stats['normal'].update(target_stats.get('normal', {}))
@ -411,6 +415,7 @@ def collect_results(args, start_time):
logging.warn('Bad zip file %s', path)
csv_report = os.path.join(args.infer_out, utils.CSV_REPORT_FILENAME)
json_report = os.path.join(args.infer_out, utils.JSON_REPORT_FILENAME)
bugs_out = os.path.join(args.infer_out, utils.BUGS_FILENAME)
if len(headers) == 0:
@ -431,11 +436,16 @@ def collect_results(args, start_time):
with open(csv_report, 'w') as report:
writer = csv.writer(report)
writer.writerows([headers[0]] + rows_remove_duplicates(all_rows))
all_csv_rows = [list(row) for row in all_csv_rows]
writer.writerows([headers[0]] + all_csv_rows)
report.flush()
# export the CSV rows to JSON
utils.create_json_report(args.infer_out)
with open(json_report, 'w') as report:
json_string = '['
json_string += ','.join(all_json_rows)
json_string += ']'
report.write(json_string)
report.flush()
print('\n')
infer.print_errors(csv_report, bugs_out)

@ -136,8 +136,7 @@ class Tracer(object):
self.build_node(node)
def build_report(self, report):
traces = json.loads(report[utils.JSON_INDEX_TRACE])
self.build_trace(traces[utils.JSON_INDEX_TRACE])
self.build_trace(report[utils.JSON_INDEX_TRACE])
def __str__(self):
return str(self.indenter)
@ -148,8 +147,7 @@ class Selector(object):
self.args = args
def has_trace(report):
trace = json.loads(report[utils.JSON_INDEX_TRACE])
return len(trace[utils.JSON_INDEX_TRACE]) > 0
return len(report[utils.JSON_INDEX_TRACE]) > 0
self.reports = [report for report in reports if has_trace(report)]
def show_choices(self):

@ -242,7 +242,7 @@ def help_exit(message):
exit(1)
def compare_rows(row_1, row_2):
def compare_csv_rows(row_1, row_2):
filename_1 = row_1[utils.CSV_INDEX_FILENAME]
filename_2 = row_2[utils.CSV_INDEX_FILENAME]
if filename_1 < filename_2:
@ -255,20 +255,20 @@ def compare_rows(row_1, row_2):
return line_1 - line_2
def should_report(analyzer, row):
error_kind = row[utils.CSV_INDEX_KIND]
error_type = row[utils.CSV_INDEX_TYPE]
error_bucket = '' # can be updated later once we extract it from qualifier
def compare_json_rows(row_1, row_2):
filename_1 = row_1[utils.JSON_INDEX_FILENAME]
filename_2 = row_2[utils.JSON_INDEX_FILENAME]
if filename_1 < filename_2:
return -1
elif filename_1 > filename_2:
return 1
else:
line_1 = int(row_1[utils.JSON_INDEX_LINE])
line_2 = int(row_2[utils.JSON_INDEX_LINE])
return line_1 - line_2
try:
qualifier_xml = ET.fromstring(row[utils.CSV_INDEX_QUALIFIER_TAGS])
if qualifier_xml.tag == utils.QUALIFIER_TAGS:
bucket = qualifier_xml.find(utils.BUCKET_TAGS)
if bucket is not None:
error_bucket = bucket.text
except ET.ParseError:
pass # this will skip any invalid xmls
def should_report(analyzer, error_kind, error_type, error_bucket):
# config what to print is listed below
error_kinds = [ERROR, WARNING]
@ -314,6 +314,55 @@ def should_report(analyzer, row):
return False
def should_report_csv(analyzer, row):
error_kind = row[utils.CSV_INDEX_KIND]
error_type = row[utils.CSV_INDEX_TYPE]
error_bucket = '' # can be updated later once we extract it from qualifier
try:
qualifier_xml = ET.fromstring(row[utils.CSV_INDEX_QUALIFIER_TAGS])
if qualifier_xml.tag == utils.QUALIFIER_TAGS:
bucket = qualifier_xml.find(utils.BUCKET_TAGS)
if bucket is not None:
error_bucket = bucket.text
except ET.ParseError:
pass # this will skip any invalid xmls
return should_report(analyzer, error_kind, error_type, error_bucket)
def should_report_json(analyzer, row):
error_kind = row[utils.JSON_INDEX_KIND]
error_type = row[utils.JSON_INDEX_TYPE]
error_bucket = '' # can be updated later once we extract it from qualifier
for qual_tag in row[utils.QUALIFIER_TAGS]:
if qual_tag['tag'] == utils.BUCKET_TAGS:
error_bucket = qual_tag['value']
break
return should_report(analyzer, error_kind, error_type, error_bucket)
def clean_json(args, json_report):
collected_rows = []
with open(json_report, 'r') as file_in:
rows = json.load(file_in)
for row in rows:
filename = row[utils.JSON_INDEX_FILENAME]
if os.path.isfile(filename):
if args.no_filtering or should_report_json(args.analyzer, row):
collected_rows.append(row)
collected_rows = sorted(
collected_rows,
cmp=compare_json_rows)
temporary_file = tempfile.mktemp()
with open(temporary_file, 'w') as file_out:
json.dump(collected_rows, file_out)
file_out.flush()
shutil.move(temporary_file, json_report)
def clean_csv(args, csv_report):
collected_rows = []
with open(csv_report, 'r') as file_in:
@ -325,11 +374,12 @@ def clean_csv(args, csv_report):
for row in rows[1:]:
filename = row[utils.CSV_INDEX_FILENAME]
if os.path.isfile(filename):
if args.no_filtering or should_report(args.analyzer, row):
if args.no_filtering \
or should_report_csv(args.analyzer, row):
collected_rows.append(row)
collected_rows = sorted(
collected_rows,
cmp=compare_rows)
cmp=compare_csv_rows)
collected_rows = [rows[0]] + collected_rows
temporary_file = tempfile.mktemp()
with open(temporary_file, 'w') as file_out:
@ -632,10 +682,10 @@ class Infer:
"""Report statistics about the computation and create a CSV file
containing the list or errors found during the analysis"""
csv_report = os.path.join(self.args.infer_out,
utils.CSV_REPORT_FILENAME)
bugs_out = os.path.join(self.args.infer_out,
utils.BUGS_FILENAME)
out_dir = self.args.infer_out
csv_report = os.path.join(out_dir, utils.CSV_REPORT_FILENAME)
json_report = os.path.join(out_dir, utils.JSON_REPORT_FILENAME)
bugs_out = os.path.join(out_dir, utils.BUGS_FILENAME)
procs_report = os.path.join(self.args.infer_out, 'procs.csv')
infer_print_cmd = [utils.get_cmd_in_bin_dir('InferPrint')]
@ -643,6 +693,7 @@ class Infer:
'-q',
'-results_dir', self.args.infer_out,
'-bugs', csv_report,
'-bugs_json', json_report,
'-procs', procs_report,
'-analyzer', self.args.analyzer
]
@ -657,8 +708,8 @@ class Infer:
+ infer_print_cmd)
else:
clean_csv(self.args, csv_report)
clean_json(self.args, json_report)
self.update_stats_with_warnings(csv_report)
utils.create_json_report(self.args.infer_out)
print('\n')
if not self.args.buck:

@ -82,8 +82,8 @@ JSON_INDEX_PROCEDURE = 'procedure'
JSON_INDEX_QUALIFIER = 'qualifier'
JSON_INDEX_QUALIFIER_TAGS = 'qualifier_tags'
JSON_INDEX_SEVERITY = 'file'
JSON_INDEX_TYPE = 'type'
JSON_INDEX_TRACE = 'trace'
JSON_INDEX_TYPE = 'bug_type'
JSON_INDEX_TRACE = 'bug_trace'
JSON_INDEX_TRACE_LEVEL = 'level'
JSON_INDEX_TRACE_FILENAME = 'filename'
JSON_INDEX_TRACE_LINE = 'line_number'
@ -391,32 +391,6 @@ def invoke_function_with_callbacks(
raise
def save_as_json(data, filename):
with open(filename, 'w') as file_out:
json.dump(data, file_out, indent=2)
def merge_json_reports(report_paths, merged_report_path):
# TODO: use streams instead of loading the entire json in memory
json_data = []
for json_path in report_paths:
with open(json_path, 'r') as fd:
json_data = json_data + json.loads(fd.read())
save_as_json(json_data, merged_report_path)
def create_json_report(out_dir):
csv_report_filename = os.path.join(out_dir, CSV_REPORT_FILENAME)
json_report_filename = os.path.join(out_dir, JSON_REPORT_FILENAME)
rows = []
with open(csv_report_filename, 'r') as file_in:
reader = csv.reader(file_in)
rows = [row for row in reader]
headers = rows[0]
issues = [dict(zip(headers, row)) for row in rows[1:]]
save_as_json(issues, json_report_filename)
def get_plural(_str, count):
plural_str = _str if count == 1 else _str + 's'
return '%d %s' % (count, plural_str)

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff
Loading…
Cancel
Save