From fc28683ea21558263c5d7bef8e35e9c30931c6e0 Mon Sep 17 00:00:00 2001 From: Jeremy Dubreil Date: Wed, 28 Sep 2016 09:45:15 -0700 Subject: [PATCH] [infer][java] When using Buck to analyze Java projects, only report the errors found on the targets passed on the command line Summary: Before this diff, Infer was simply going through the list of jar files found in `buck-out` and was loading all the `report.json` files found in those jar files in order to merge them into a final report. The main drawback of this was that removing `buck-out` was mandatory to get accurate results when switching between targets to analyze. With this diff, we now use the `buck audit classpath` option to get from a list of targets, the list of jar files to load the `report.json` files from. This allows to more easily use Infer from the command line when switch branches from the repositories or when switching between targets to analyze. Reviewed By: martinoluca Differential Revision: D3922548 fbshipit-source-id: ec550fa --- infer/lib/python/inferlib/bucklib.py | 89 +++++++++++++++------------- 1 file changed, 48 insertions(+), 41 deletions(-) diff --git a/infer/lib/python/inferlib/bucklib.py b/infer/lib/python/inferlib/bucklib.py index 68340f9b1..3fdffd5fc 100644 --- a/infer/lib/python/inferlib/bucklib.py +++ b/infer/lib/python/inferlib/bucklib.py @@ -273,7 +273,16 @@ def load_json_report(opened_jar): raise NotFoundInJar -def collect_results(args, start_time): +def get_output_jars(targets): + if len(targets) == 0: + return [] + else: + audit_output = subprocess.check_output( + ['buck', 'audit', 'classpath'] + targets) + return audit_output.strip().split('\n') + + +def collect_results(args, start_time, targets): """Walks through buck-gen, collects results for the different buck targets and stores them in in args.infer_out/results.csv. """ @@ -297,45 +306,43 @@ def collect_results(args, start_time): expected_analyzer = stats['normal']['analyzer'] expected_version = stats['normal']['infer_version'] - for root, _, files in os.walk(DEFAULT_BUCK_OUT_GEN): - for f in [f for f in files if f.endswith('.jar')]: - path = os.path.join(root, f) - try: - with zipfile.ZipFile(path) as jar: - # Accumulate integers and float values - target_stats = load_stats(jar) - - found_analyzer = target_stats['normal']['analyzer'] - found_version = target_stats['normal']['infer_version'] - - if (found_analyzer != expected_analyzer - or found_version != expected_version): - continue - else: - for type_k in ['int', 'float']: - items = target_stats.get(type_k, {}).items() - for key, value in items: - if not any(map(lambda r: r.match(key), + for path in get_output_jars(targets): + try: + with zipfile.ZipFile(path) as jar: + # Accumulate integers and float values + target_stats = load_stats(jar) + + found_analyzer = target_stats['normal']['analyzer'] + found_version = target_stats['normal']['infer_version'] + + if found_analyzer != expected_analyzer \ + or found_version != expected_version: + continue + else: + for type_k in ['int', 'float']: + items = target_stats.get(type_k, {}).items() + for key, value in items: + if not any(map(lambda r: r.match(key), accumulation_whitelist)): - old_value = stats[type_k].get(key, 0) - stats[type_k][key] = old_value + value - - csv_rows = load_csv_report(jar) - if len(csv_rows) > 0: - headers.append(csv_rows[0]) - for row in csv_rows[1:]: - all_csv_rows.add(tuple(row)) - - json_rows = load_json_report(jar) - for row in json_rows: - all_json_rows.add(json.dumps(row)) - - # Override normals - stats['normal'].update(target_stats.get('normal', {})) - except NotFoundInJar: - pass - except zipfile.BadZipfile: - logging.warn('Bad zip file %s', path) + old_value = stats[type_k].get(key, 0) + stats[type_k][key] = old_value + value + + csv_rows = load_csv_report(jar) + if len(csv_rows) > 0: + headers.append(csv_rows[0]) + for row in csv_rows[1:]: + all_csv_rows.add(tuple(row)) + + json_rows = load_json_report(jar) + for row in json_rows: + all_json_rows.add(json.dumps(row)) + + # Override normals + stats['normal'].update(target_stats.get('normal', {})) + except NotFoundInJar: + pass + except zipfile.BadZipfile: + logging.warn('Bad zip file %s', path) csv_report = os.path.join(args.infer_out, config.CSV_REPORT_FILENAME) json_report = os.path.join(args.infer_out, config.JSON_REPORT_FILENAME) @@ -459,7 +466,7 @@ class Wrapper: def _collect_results(self, start_time): self.timer.start('Collecting results ...') - collect_results(self.infer_args, start_time) + collect_results(self.infer_args, start_time, self.normalized_targets) self.timer.stop('Done') def run(self): @@ -484,7 +491,7 @@ class Wrapper: buck_cmd = self.buck_cmd + javac_config subprocess.check_call(buck_cmd) self.timer.stop('Buck finished') - self._collect_results(start_time) + self._collect_results(start_time) return os.EX_OK except KeyboardInterrupt as e: self.timer.stop('Exiting')