[infer][java] When using Buck to analyze Java projects, only report the errors found on the targets passed on the command line

Summary:
Before this diff, Infer was simply going through the list of jar files found in `buck-out` and was loading all the `report.json` files found in those jar files in order to merge them into a final report. The main drawback of this was that removing `buck-out` was mandatory to get accurate results when switching between targets to analyze.

With this diff, we now use the `buck audit classpath` option to get from a list of targets, the list of jar files to load the `report.json` files from. This allows to more easily use Infer from the command line when switch branches from the repositories or when switching between targets to analyze.

Reviewed By: martinoluca

Differential Revision: D3922548

fbshipit-source-id: ec550fa
master
Jeremy Dubreil 8 years ago committed by Facebook Github Bot 9
parent befab1007b
commit fc28683ea2

@ -273,7 +273,16 @@ def load_json_report(opened_jar):
raise NotFoundInJar raise NotFoundInJar
def collect_results(args, start_time): def get_output_jars(targets):
if len(targets) == 0:
return []
else:
audit_output = subprocess.check_output(
['buck', 'audit', 'classpath'] + targets)
return audit_output.strip().split('\n')
def collect_results(args, start_time, targets):
"""Walks through buck-gen, collects results for the different buck targets """Walks through buck-gen, collects results for the different buck targets
and stores them in in args.infer_out/results.csv. and stores them in in args.infer_out/results.csv.
""" """
@ -297,45 +306,43 @@ def collect_results(args, start_time):
expected_analyzer = stats['normal']['analyzer'] expected_analyzer = stats['normal']['analyzer']
expected_version = stats['normal']['infer_version'] expected_version = stats['normal']['infer_version']
for root, _, files in os.walk(DEFAULT_BUCK_OUT_GEN): for path in get_output_jars(targets):
for f in [f for f in files if f.endswith('.jar')]: try:
path = os.path.join(root, f) with zipfile.ZipFile(path) as jar:
try: # Accumulate integers and float values
with zipfile.ZipFile(path) as jar: target_stats = load_stats(jar)
# Accumulate integers and float values
target_stats = load_stats(jar) found_analyzer = target_stats['normal']['analyzer']
found_version = target_stats['normal']['infer_version']
found_analyzer = target_stats['normal']['analyzer']
found_version = target_stats['normal']['infer_version'] if found_analyzer != expected_analyzer \
or found_version != expected_version:
if (found_analyzer != expected_analyzer continue
or found_version != expected_version): else:
continue for type_k in ['int', 'float']:
else: items = target_stats.get(type_k, {}).items()
for type_k in ['int', 'float']: for key, value in items:
items = target_stats.get(type_k, {}).items() if not any(map(lambda r: r.match(key),
for key, value in items:
if not any(map(lambda r: r.match(key),
accumulation_whitelist)): accumulation_whitelist)):
old_value = stats[type_k].get(key, 0) old_value = stats[type_k].get(key, 0)
stats[type_k][key] = old_value + value stats[type_k][key] = old_value + value
csv_rows = load_csv_report(jar) csv_rows = load_csv_report(jar)
if len(csv_rows) > 0: if len(csv_rows) > 0:
headers.append(csv_rows[0]) headers.append(csv_rows[0])
for row in csv_rows[1:]: for row in csv_rows[1:]:
all_csv_rows.add(tuple(row)) all_csv_rows.add(tuple(row))
json_rows = load_json_report(jar) json_rows = load_json_report(jar)
for row in json_rows: for row in json_rows:
all_json_rows.add(json.dumps(row)) all_json_rows.add(json.dumps(row))
# Override normals # Override normals
stats['normal'].update(target_stats.get('normal', {})) stats['normal'].update(target_stats.get('normal', {}))
except NotFoundInJar: except NotFoundInJar:
pass pass
except zipfile.BadZipfile: except zipfile.BadZipfile:
logging.warn('Bad zip file %s', path) logging.warn('Bad zip file %s', path)
csv_report = os.path.join(args.infer_out, config.CSV_REPORT_FILENAME) csv_report = os.path.join(args.infer_out, config.CSV_REPORT_FILENAME)
json_report = os.path.join(args.infer_out, config.JSON_REPORT_FILENAME) json_report = os.path.join(args.infer_out, config.JSON_REPORT_FILENAME)
@ -459,7 +466,7 @@ class Wrapper:
def _collect_results(self, start_time): def _collect_results(self, start_time):
self.timer.start('Collecting results ...') self.timer.start('Collecting results ...')
collect_results(self.infer_args, start_time) collect_results(self.infer_args, start_time, self.normalized_targets)
self.timer.stop('Done') self.timer.stop('Done')
def run(self): def run(self):
@ -484,7 +491,7 @@ class Wrapper:
buck_cmd = self.buck_cmd + javac_config buck_cmd = self.buck_cmd + javac_config
subprocess.check_call(buck_cmd) subprocess.check_call(buck_cmd)
self.timer.stop('Buck finished') self.timer.stop('Buck finished')
self._collect_results(start_time) self._collect_results(start_time)
return os.EX_OK return os.EX_OK
except KeyboardInterrupt as e: except KeyboardInterrupt as e:
self.timer.stop('Exiting') self.timer.stop('Exiting')

Loading…
Cancel
Save