[infer][python] no longer generate the CSV report during the analysis

Reviewed By: jberdine

Differential Revision: D4287279

fbshipit-source-id: 670012c
master
Jeremy Dubreil 8 years ago committed by Facebook Github Bot
parent 6051a3e34c
commit aa362a27ed

@ -12,6 +12,7 @@ from __future__ import unicode_literals
import argparse import argparse
import csv import csv
import json
import logging import logging
import multiprocessing import multiprocessing
import os import os
@ -98,20 +99,21 @@ infer_group.add_argument('-pr', '--project-root',
help='Location of the project root ' help='Location of the project root '
'(default is current directory)') '(default is current directory)')
infer_group.add_argument('-j', '--multicore', metavar='n', type=int, infer_group.add_argument('-j', '--multicore', metavar='n', type=int,
default=multiprocessing.cpu_count(), default=multiprocessing.cpu_count(),
dest='multicore', help='Set the number of cores to ' dest='multicore', help='Set the number of cores to '
'be used for the analysis (default uses all cores)') 'be used for the analysis (default uses all cores)')
infer_group.add_argument('-l', '--load-average', metavar='<float>', type=float, infer_group.add_argument('-l', '--load-average', metavar='<float>', type=float,
help='Specifies that no new jobs (commands) should ' help='Specifies that no new jobs (commands) should '
'be started if there are others jobs running and the ' 'be started if there are others jobs running and the '
'load average is at least <float>.') 'load average is at least <float>.')
infer_group.add_argument('--buck', action='store_true', dest='buck', infer_group.add_argument('--buck', action='store_true', dest='buck',
help='To use when run with buck') help='To use when run with buck')
infer_group.add_argument('--java-jar-compiler', infer_group.add_argument('--java-jar-compiler',
metavar='<file>') metavar='<file>')
def remove_infer_out(infer_out): def remove_infer_out(infer_out):
# it is safe to ignore errors here because recreating the infer_out # it is safe to ignore errors here because recreating the infer_out
# directory will fail later # directory will fail later
@ -270,12 +272,11 @@ class AnalyzerWrapper(object):
return exit_status return exit_status
def update_stats_with_warnings(self, csv_report): def update_stats_with_warnings(self, json_report):
with open(csv_report, 'r') as file_in: with open(json_report, 'r') as file_in:
reader = utils.locale_csv_reader(file_in) entries = json.load(file_in)
rows = [row for row in reader][1:] for entry in entries:
for row in rows: key = entry[issues.JSON_INDEX_TYPE]
key = row[issues.CSV_INDEX_TYPE]
previous_value = self.stats['int'].get(key, 0) previous_value = self.stats['int'].get(key, 0)
self.stats['int'][key] = previous_value + 1 self.stats['int'][key] = previous_value + 1
@ -284,7 +285,6 @@ class AnalyzerWrapper(object):
containing the list or errors found during the analysis""" containing the list or errors found during the analysis"""
out_dir = self.args.infer_out out_dir = self.args.infer_out
csv_report = os.path.join(out_dir, config.CSV_REPORT_FILENAME)
json_report = os.path.join(out_dir, config.JSON_REPORT_FILENAME) json_report = os.path.join(out_dir, config.JSON_REPORT_FILENAME)
procs_report = os.path.join(self.args.infer_out, 'procs.csv') procs_report = os.path.join(self.args.infer_out, 'procs.csv')
@ -292,7 +292,6 @@ class AnalyzerWrapper(object):
infer_print_options = [ infer_print_options = [
'-q', '-q',
'-results_dir', self.args.infer_out, '-results_dir', self.args.infer_out,
'-bugs', csv_report,
'-bugs_json', json_report, '-bugs_json', json_report,
'-procs', procs_report, '-procs', procs_report,
'-analyzer', self.args.analyzer '-analyzer', self.args.analyzer
@ -307,7 +306,7 @@ class AnalyzerWrapper(object):
'Error with InferPrint with the command: {}'.format( 'Error with InferPrint with the command: {}'.format(
infer_print_cmd)) infer_print_cmd))
else: else:
self.update_stats_with_warnings(csv_report) self.update_stats_with_warnings(json_report)
return exit_status return exit_status

@ -14,7 +14,6 @@ from __future__ import unicode_literals
import argparse import argparse
import csv import csv
import io
import json import json
import logging import logging
import multiprocessing import multiprocessing
@ -38,8 +37,6 @@ ANALYSIS_SUMMARY_OUTPUT = 'analysis_summary.txt'
DEFAULT_BUCK_OUT = os.path.join(utils.decode(os.getcwd()), 'buck-out') DEFAULT_BUCK_OUT = os.path.join(utils.decode(os.getcwd()), 'buck-out')
DEFAULT_BUCK_OUT_GEN = os.path.join(DEFAULT_BUCK_OUT, 'gen') DEFAULT_BUCK_OUT_GEN = os.path.join(DEFAULT_BUCK_OUT, 'gen')
INFER_CSV_REPORT = os.path.join(config.BUCK_INFER_OUT,
config.CSV_REPORT_FILENAME)
INFER_JSON_REPORT = os.path.join(config.BUCK_INFER_OUT, INFER_JSON_REPORT = os.path.join(config.BUCK_INFER_OUT,
config.JSON_REPORT_FILENAME) config.JSON_REPORT_FILENAME)
INFER_STATS = os.path.join(config.BUCK_INFER_OUT, config.STATS_FILENAME) INFER_STATS = os.path.join(config.BUCK_INFER_OUT, config.STATS_FILENAME)
@ -136,7 +133,8 @@ def get_normalized_targets(targets):
subprocess.check_output(buck_cmd).decode().strip().split('\n')) subprocess.check_output(buck_cmd).decode().strip().split('\n'))
return targets return targets
except subprocess.CalledProcessError as e: except subprocess.CalledProcessError as e:
logging.error('Error while expanding targets with {0}'.format(buck_cmd)) logging.error('Error while expanding targets with {0}'.format(
buck_cmd))
raise e raise e
@ -270,14 +268,6 @@ def load_stats(opened_jar):
raise NotFoundInJar raise NotFoundInJar
def load_csv_report(opened_jar):
try:
sio = io.StringIO(opened_jar.read(INFER_CSV_REPORT).decode())
return list(utils.locale_csv_reader(sio))
except KeyError:
raise NotFoundInJar
def load_json_report(opened_jar): def load_json_report(opened_jar):
try: try:
return json.loads(opened_jar.read(INFER_JSON_REPORT).decode()) return json.loads(opened_jar.read(INFER_JSON_REPORT).decode())
@ -304,9 +294,7 @@ def collect_results(args, start_time, targets):
with open(os.path.join(args.infer_out, ANALYSIS_SUMMARY_OUTPUT), 'w') as f: with open(os.path.join(args.infer_out, ANALYSIS_SUMMARY_OUTPUT), 'w') as f:
f.write(buck_stats) f.write(buck_stats)
all_csv_rows = set()
all_json_rows = set() all_json_rows = set()
headers = []
stats = init_stats(args, start_time) stats = init_stats(args, start_time)
accumulation_whitelist = list(map(re.compile, [ accumulation_whitelist = list(map(re.compile, [
@ -340,12 +328,6 @@ def collect_results(args, start_time, targets):
old_value = stats[type_k].get(key, 0) old_value = stats[type_k].get(key, 0)
stats[type_k][key] = old_value + value stats[type_k][key] = old_value + value
csv_rows = load_csv_report(jar)
if len(csv_rows) > 0:
headers.append(csv_rows[0])
for row in csv_rows[1:]:
all_csv_rows.add(tuple(row))
json_rows = load_json_report(jar) json_rows = load_json_report(jar)
for row in json_rows: for row in json_rows:
all_json_rows.add(json.dumps(row)) all_json_rows.add(json.dumps(row))
@ -357,13 +339,8 @@ def collect_results(args, start_time, targets):
except zipfile.BadZipfile: except zipfile.BadZipfile:
logging.warn('Bad zip file %s', path) logging.warn('Bad zip file %s', path)
csv_report = os.path.join(args.infer_out, config.CSV_REPORT_FILENAME)
json_report = os.path.join(args.infer_out, config.JSON_REPORT_FILENAME) json_report = os.path.join(args.infer_out, config.JSON_REPORT_FILENAME)
if len(headers) > 1:
if any(map(lambda x: x != headers[0], headers)):
raise Exception('Inconsistent reports found')
# Convert all float values to integer values # Convert all float values to integer values
for key, value in stats.get('float', {}).items(): for key, value in stats.get('float', {}).items():
stats['int'][key] = int(round(value)) stats['int'][key] = int(round(value))
@ -371,13 +348,6 @@ def collect_results(args, start_time, targets):
# Delete the float entries before exporting the results # Delete the float entries before exporting the results
del(stats['float']) del(stats['float'])
with open(csv_report, 'w') as report:
if len(headers) > 0:
writer = csv.writer(report)
all_csv_rows = [list(row) for row in all_csv_rows]
writer.writerows([headers[0]] + all_csv_rows)
report.flush()
with open(json_report, 'w') as report: with open(json_report, 'w') as report:
json_string = '[' json_string = '['
json_string += ','.join(all_json_rows) json_string += ','.join(all_json_rows)

@ -45,7 +45,6 @@ CSV_PERF_FILENAME = 'performances.csv'
STATS_FILENAME = 'stats.json' STATS_FILENAME = 'stats.json'
PROC_STATS_FILENAME = 'proc_stats.json' PROC_STATS_FILENAME = 'proc_stats.json'
CSV_REPORT_FILENAME = 'report.csv'
JSON_REPORT_FILENAME = 'report.json' JSON_REPORT_FILENAME = 'report.json'
INFER_BUCK_DEPS_FILENAME = 'infer-deps.txt' INFER_BUCK_DEPS_FILENAME = 'infer-deps.txt'
BUGS_FILENAME = 'bugs.txt' BUGS_FILENAME = 'bugs.txt'

@ -31,9 +31,6 @@ ISSUE_KIND_WARNING = 'WARNING'
ISSUE_KIND_INFO = 'INFO' ISSUE_KIND_INFO = 'INFO'
ISSUE_KIND_ADVICE = 'ADVICE' ISSUE_KIND_ADVICE = 'ADVICE'
# indices in rows of csv reports
CSV_INDEX_TYPE = 2
# field names in rows of json reports # field names in rows of json reports
JSON_INDEX_DOTTY = 'dotty' JSON_INDEX_DOTTY = 'dotty'
JSON_INDEX_FILENAME = 'file' JSON_INDEX_FILENAME = 'file'

Loading…
Cancel
Save