Summary: public This unclutters infer/bin/ and gives more structure to infer/lib/ Reviewed By: jeremydubreil Differential Revision: D2605809 fb-gh-sync-id: 508fc2cmaster
parent
c602ce7a83
commit
872ce8ea87
@ -1,542 +1 @@
|
|||||||
#!/usr/bin/env python2.7
|
../lib/python/BuckAnalyze
|
||||||
|
|
||||||
# Copyright (c) 2013 - present Facebook, Inc.
|
|
||||||
# All rights reserved.
|
|
||||||
#
|
|
||||||
# This source code is licensed under the BSD style license found in the
|
|
||||||
# LICENSE file in the root directory of this source tree. An additional grant
|
|
||||||
# of patent rights can be found in the PATENTS file in the same directory.
|
|
||||||
|
|
||||||
from __future__ import absolute_import
|
|
||||||
from __future__ import division
|
|
||||||
from __future__ import print_function
|
|
||||||
from __future__ import unicode_literals
|
|
||||||
|
|
||||||
import argparse
|
|
||||||
import csv
|
|
||||||
import io
|
|
||||||
import json
|
|
||||||
import logging
|
|
||||||
import multiprocessing
|
|
||||||
import os
|
|
||||||
import platform
|
|
||||||
import re
|
|
||||||
import shutil
|
|
||||||
import stat
|
|
||||||
import subprocess
|
|
||||||
import sys
|
|
||||||
import tempfile
|
|
||||||
import time
|
|
||||||
import traceback
|
|
||||||
import zipfile
|
|
||||||
|
|
||||||
# Infer imports
|
|
||||||
import inferlib
|
|
||||||
import utils
|
|
||||||
|
|
||||||
ANALYSIS_SUMMARY_OUTPUT = 'analysis_summary.txt'
|
|
||||||
|
|
||||||
BUCK_CONFIG = '.buckconfig.local'
|
|
||||||
BUCK_CONFIG_BACKUP = '.buckconfig.local.backup_generated_by_infer'
|
|
||||||
DEFAULT_BUCK_OUT = os.path.join(os.getcwd(), 'buck-out')
|
|
||||||
DEFAULT_BUCK_OUT_GEN = os.path.join(DEFAULT_BUCK_OUT, 'gen')
|
|
||||||
|
|
||||||
INFER_REPORT = os.path.join(utils.BUCK_INFER_OUT, utils.CSV_REPORT_FILENAME)
|
|
||||||
INFER_STATS = os.path.join(utils.BUCK_INFER_OUT, utils.STATS_FILENAME)
|
|
||||||
|
|
||||||
INFER_SCRIPT = """\
|
|
||||||
#!/usr/bin/env {0}
|
|
||||||
import subprocess
|
|
||||||
import sys
|
|
||||||
|
|
||||||
cmd = ['{0}'] + {1} + ['--', 'javac'] + sys.argv[1:]
|
|
||||||
subprocess.check_call(cmd)
|
|
||||||
"""
|
|
||||||
|
|
||||||
LOCAL_CONFIG = """\
|
|
||||||
[tools]
|
|
||||||
javac = %s
|
|
||||||
"""
|
|
||||||
|
|
||||||
|
|
||||||
def prepare_build(args):
|
|
||||||
"""Creates script that redirects javac calls to infer and a local buck
|
|
||||||
configuration that tells buck to use that script.
|
|
||||||
"""
|
|
||||||
|
|
||||||
infer_options = [
|
|
||||||
'--buck',
|
|
||||||
'--incremental',
|
|
||||||
'--analyzer', args.analyzer,
|
|
||||||
]
|
|
||||||
|
|
||||||
if args.debug:
|
|
||||||
infer_options.append('--debug')
|
|
||||||
|
|
||||||
if args.no_filtering:
|
|
||||||
infer_options.append('--no-filtering')
|
|
||||||
|
|
||||||
if args.infer_out is not None:
|
|
||||||
infer_options += ['--out', args.infer_out]
|
|
||||||
|
|
||||||
# Create a temporary directory as a cache for jar files.
|
|
||||||
infer_cache_dir = os.path.join(args.infer_out, 'cache')
|
|
||||||
if not os.path.isdir(infer_cache_dir):
|
|
||||||
os.mkdir(infer_cache_dir)
|
|
||||||
infer_options.append('--infer_cache')
|
|
||||||
infer_options.append(infer_cache_dir)
|
|
||||||
temp_files = [infer_cache_dir]
|
|
||||||
|
|
||||||
try:
|
|
||||||
infer = [utils.get_cmd_in_bin_dir('infer')] + infer_options
|
|
||||||
except subprocess.CalledProcessError as e:
|
|
||||||
logging.error('Could not find infer')
|
|
||||||
raise e
|
|
||||||
|
|
||||||
# Disable the use of buckd as this scripts modifies .buckconfig.local
|
|
||||||
logging.info('Disabling buckd: export NO_BUCKD=1')
|
|
||||||
os.environ['NO_BUCKD'] = '1'
|
|
||||||
|
|
||||||
# make sure INFER_ANALYSIS is set when buck is called
|
|
||||||
logging.info('Setup Infer analysis mode for Buck: export INFER_ANALYSIS=1')
|
|
||||||
os.environ['INFER_ANALYSIS'] = '1'
|
|
||||||
|
|
||||||
# Create a script to be called by buck
|
|
||||||
infer_script = None
|
|
||||||
with tempfile.NamedTemporaryFile(delete=False,
|
|
||||||
prefix='infer_',
|
|
||||||
suffix='.py',
|
|
||||||
dir='.') as infer_script:
|
|
||||||
logging.info('Creating %s' % infer_script.name)
|
|
||||||
infer_script.file.write(
|
|
||||||
(INFER_SCRIPT.format(sys.executable, infer)).encode())
|
|
||||||
|
|
||||||
st = os.stat(infer_script.name)
|
|
||||||
os.chmod(infer_script.name, st.st_mode | stat.S_IEXEC)
|
|
||||||
|
|
||||||
# Backup and patch local buck config
|
|
||||||
patched_config = ''
|
|
||||||
if os.path.isfile(BUCK_CONFIG):
|
|
||||||
logging.info('Backing up %s to %s', BUCK_CONFIG, BUCK_CONFIG_BACKUP)
|
|
||||||
shutil.move(BUCK_CONFIG, BUCK_CONFIG_BACKUP)
|
|
||||||
with open(BUCK_CONFIG_BACKUP) as buckconfig:
|
|
||||||
patched_config = '\n'.join(buckconfig)
|
|
||||||
|
|
||||||
javac_section = '[tools]\n{0}javac = {1}'.format(
|
|
||||||
' ' * 4,
|
|
||||||
infer_script.name)
|
|
||||||
patched_config += javac_section
|
|
||||||
with open(BUCK_CONFIG, 'w') as buckconfig:
|
|
||||||
buckconfig.write(patched_config)
|
|
||||||
|
|
||||||
temp_files += [infer_script.name]
|
|
||||||
return temp_files
|
|
||||||
|
|
||||||
|
|
||||||
def java_targets():
|
|
||||||
target_types = [
|
|
||||||
'android_library',
|
|
||||||
'java_library',
|
|
||||||
]
|
|
||||||
try:
|
|
||||||
targets = subprocess.check_output([
|
|
||||||
'buck',
|
|
||||||
'targets',
|
|
||||||
'--type',
|
|
||||||
] + target_types).decode().strip().split('\n')
|
|
||||||
except subprocess.CalledProcessError as e:
|
|
||||||
logging.error('Could not compute java library targets')
|
|
||||||
raise e
|
|
||||||
return set(targets)
|
|
||||||
|
|
||||||
|
|
||||||
def is_alias(target):
|
|
||||||
return ':' not in target
|
|
||||||
|
|
||||||
|
|
||||||
def expand_target(target, java_targets):
|
|
||||||
if not is_alias(target):
|
|
||||||
return [target]
|
|
||||||
else:
|
|
||||||
try:
|
|
||||||
buck_audit_cmd = ['buck', 'audit', 'classpath', '--dot', target]
|
|
||||||
output = subprocess.check_output(buck_audit_cmd)
|
|
||||||
dotty = output.decode().split('\n')
|
|
||||||
except subprocess.CalledProcessError as e:
|
|
||||||
logging.error('Could not expand target {0}'.format(target))
|
|
||||||
raise e
|
|
||||||
targets = set()
|
|
||||||
edge_re = re.compile('.*"(.*)".*"(.*)".*')
|
|
||||||
for line in dotty:
|
|
||||||
match = re.match(edge_re, line)
|
|
||||||
if match:
|
|
||||||
for t in match.groups():
|
|
||||||
if t in java_targets:
|
|
||||||
targets.add(t)
|
|
||||||
return targets
|
|
||||||
|
|
||||||
|
|
||||||
def normalize_target(target):
|
|
||||||
if is_alias(target) or target.startswith('//'):
|
|
||||||
return target
|
|
||||||
else:
|
|
||||||
return '//' + target
|
|
||||||
|
|
||||||
|
|
||||||
def determine_library_targets(args):
|
|
||||||
""" Uses git and buck audit to expand aliases into the list of java or
|
|
||||||
android library targets that are parts of these aliases.
|
|
||||||
Buck targets directly passed as argument are not expanded """
|
|
||||||
|
|
||||||
args.targets = [normalize_target(t) for t in args.targets]
|
|
||||||
|
|
||||||
if any(map(is_alias, args.targets)):
|
|
||||||
all_java_targets = java_targets()
|
|
||||||
targets = set()
|
|
||||||
for t in args.targets:
|
|
||||||
targets.update(expand_target(t, all_java_targets))
|
|
||||||
args.targets = list(targets)
|
|
||||||
|
|
||||||
if args.verbose:
|
|
||||||
logging.debug('Targets to analyze:')
|
|
||||||
for target in args.targets:
|
|
||||||
logging.debug(target)
|
|
||||||
|
|
||||||
|
|
||||||
def init_stats(args, start_time):
|
|
||||||
"""Returns dictionary with target independent statistics.
|
|
||||||
"""
|
|
||||||
return {
|
|
||||||
'float': {},
|
|
||||||
'int': {
|
|
||||||
'cores': multiprocessing.cpu_count(),
|
|
||||||
'time': int(time.time()),
|
|
||||||
'start_time': int(round(start_time)),
|
|
||||||
},
|
|
||||||
'normal': {
|
|
||||||
'debug': str(args.debug),
|
|
||||||
'analyzer': args.analyzer,
|
|
||||||
'machine': platform.machine(),
|
|
||||||
'node': platform.node(),
|
|
||||||
'project': os.path.basename(os.getcwd()),
|
|
||||||
'revision': utils.vcs_revision(),
|
|
||||||
'branch': utils.vcs_branch(),
|
|
||||||
'system': platform.system(),
|
|
||||||
'infer_version': utils.infer_version(),
|
|
||||||
'infer_branch': utils.infer_branch(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
def store_performances_csv(infer_out, stats):
|
|
||||||
"""Stores the statistics about perfromances into a CSV file to be exported
|
|
||||||
to a database"""
|
|
||||||
perf_filename = os.path.join(infer_out, utils.CSV_PERF_FILENAME)
|
|
||||||
with open(perf_filename, 'w') as csv_file_out:
|
|
||||||
csv_writer = csv.writer(csv_file_out)
|
|
||||||
keys = ['infer_version', 'project', 'revision', 'files', 'lines',
|
|
||||||
'cores', 'system', 'machine', 'node', 'total_time',
|
|
||||||
'capture_time', 'analysis_time', 'reporting_time', 'time']
|
|
||||||
int_stats = list(stats['int'].items())
|
|
||||||
normal_stats = list(stats['normal'].items())
|
|
||||||
flat_stats = dict(int_stats + normal_stats)
|
|
||||||
values = []
|
|
||||||
for key in keys:
|
|
||||||
values.append(flat_stats[key])
|
|
||||||
csv_writer.writerow(keys)
|
|
||||||
csv_writer.writerow(values)
|
|
||||||
csv_file_out.flush()
|
|
||||||
|
|
||||||
|
|
||||||
def get_harness_code():
|
|
||||||
all_harness_code = '\nGenerated harness code:\n'
|
|
||||||
for filename in os.listdir(DEFAULT_BUCK_OUT_GEN):
|
|
||||||
if 'InferGeneratedHarness' in filename:
|
|
||||||
all_harness_code += '\n' + filename + ':\n'
|
|
||||||
with open(os.path.join(DEFAULT_BUCK_OUT_GEN,
|
|
||||||
filename), 'r') as file_in:
|
|
||||||
all_harness_code += file_in.read()
|
|
||||||
return all_harness_code + '\n'
|
|
||||||
|
|
||||||
|
|
||||||
def get_basic_stats(stats):
|
|
||||||
files_analyzed = '{0} files ({1} lines) analyzed in {2}s\n\n'.format(
|
|
||||||
stats['int']['files'],
|
|
||||||
stats['int']['lines'],
|
|
||||||
stats['int']['total_time'],
|
|
||||||
)
|
|
||||||
phase_times = 'Capture time: {0}s\nAnalysis time: {1}s\n\n'.format(
|
|
||||||
stats['int']['capture_time'],
|
|
||||||
stats['int']['analysis_time'],
|
|
||||||
)
|
|
||||||
|
|
||||||
to_skip = {
|
|
||||||
'files',
|
|
||||||
'procedures',
|
|
||||||
'lines',
|
|
||||||
'cores',
|
|
||||||
'time',
|
|
||||||
'start_time',
|
|
||||||
'capture_time',
|
|
||||||
'analysis_time',
|
|
||||||
'reporting_time',
|
|
||||||
'total_time',
|
|
||||||
'makefile_generation_time'
|
|
||||||
}
|
|
||||||
bugs_found = 'Errors found:\n\n'
|
|
||||||
for key, value in sorted(stats['int'].items()):
|
|
||||||
if key not in to_skip:
|
|
||||||
bugs_found += ' {0:>8} {1}\n'.format(value, key)
|
|
||||||
|
|
||||||
basic_stats_message = files_analyzed + phase_times + bugs_found + '\n'
|
|
||||||
return basic_stats_message
|
|
||||||
|
|
||||||
|
|
||||||
def get_buck_stats():
|
|
||||||
trace_filename = os.path.join(
|
|
||||||
DEFAULT_BUCK_OUT,
|
|
||||||
'log',
|
|
||||||
'traces',
|
|
||||||
'build.trace'
|
|
||||||
)
|
|
||||||
ARGS = 'args'
|
|
||||||
SUCCESS_STATUS = 'success_type'
|
|
||||||
buck_stats = {}
|
|
||||||
try:
|
|
||||||
with open(trace_filename, 'r') as file_in:
|
|
||||||
trace = json.load(file_in)
|
|
||||||
for t in trace:
|
|
||||||
if SUCCESS_STATUS in t[ARGS]:
|
|
||||||
status = t[ARGS][SUCCESS_STATUS]
|
|
||||||
count = buck_stats.get(status, 0)
|
|
||||||
buck_stats[status] = count + 1
|
|
||||||
|
|
||||||
buck_stats_message = 'Buck build statistics:\n\n'
|
|
||||||
for key, value in sorted(buck_stats.items()):
|
|
||||||
buck_stats_message += ' {0:>8} {1}\n'.format(value, key)
|
|
||||||
|
|
||||||
return buck_stats_message
|
|
||||||
except IOError as e:
|
|
||||||
logging.error('Caught %s: %s' % (e.__class__.__name__, str(e)))
|
|
||||||
logging.error(traceback.format_exc())
|
|
||||||
return ''
|
|
||||||
|
|
||||||
|
|
||||||
class NotFoundInJar(Exception):
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
def load_stats(opened_jar):
|
|
||||||
try:
|
|
||||||
return json.loads(opened_jar.read(INFER_STATS).decode())
|
|
||||||
except KeyError as e:
|
|
||||||
raise NotFoundInJar
|
|
||||||
|
|
||||||
|
|
||||||
def load_report(opened_jar):
|
|
||||||
try:
|
|
||||||
sio = io.StringIO(opened_jar.read(INFER_REPORT).decode())
|
|
||||||
return list(csv.reader(sio))
|
|
||||||
except KeyError as e:
|
|
||||||
raise NotFoundInJar
|
|
||||||
|
|
||||||
|
|
||||||
def rows_remove_duplicates(rows):
|
|
||||||
seen = {}
|
|
||||||
result = []
|
|
||||||
for row in rows:
|
|
||||||
t = tuple(row)
|
|
||||||
if t in seen:
|
|
||||||
continue
|
|
||||||
seen[t] = 1
|
|
||||||
result.append(row)
|
|
||||||
return result
|
|
||||||
|
|
||||||
|
|
||||||
def collect_results(args, start_time):
|
|
||||||
"""Walks through buck-gen, collects results for the different buck targets
|
|
||||||
and stores them in in args.infer_out/results.csv.
|
|
||||||
"""
|
|
||||||
buck_stats = get_buck_stats()
|
|
||||||
logging.info(buck_stats)
|
|
||||||
with open(os.path.join(args.infer_out, ANALYSIS_SUMMARY_OUTPUT), 'w') as f:
|
|
||||||
f.write(buck_stats)
|
|
||||||
|
|
||||||
all_rows = []
|
|
||||||
headers = []
|
|
||||||
stats = init_stats(args, start_time)
|
|
||||||
|
|
||||||
accumulation_whitelist = list(map(re.compile, [
|
|
||||||
'^cores$',
|
|
||||||
'^time$',
|
|
||||||
'^start_time$',
|
|
||||||
'.*_pc',
|
|
||||||
]))
|
|
||||||
|
|
||||||
expected_analyzer = stats['normal']['analyzer']
|
|
||||||
expected_version = stats['normal']['infer_version']
|
|
||||||
|
|
||||||
for root, _, files in os.walk(DEFAULT_BUCK_OUT_GEN):
|
|
||||||
for f in [f for f in files if f.endswith('.jar')]:
|
|
||||||
path = os.path.join(root, f)
|
|
||||||
try:
|
|
||||||
with zipfile.ZipFile(path) as jar:
|
|
||||||
# Accumulate integers and float values
|
|
||||||
target_stats = load_stats(jar)
|
|
||||||
|
|
||||||
found_analyzer = target_stats['normal']['analyzer']
|
|
||||||
found_version = target_stats['normal']['infer_version']
|
|
||||||
|
|
||||||
if (found_analyzer != expected_analyzer
|
|
||||||
or found_version != expected_version):
|
|
||||||
continue
|
|
||||||
else:
|
|
||||||
for type_k in ['int', 'float']:
|
|
||||||
items = target_stats.get(type_k, {}).items()
|
|
||||||
for key, value in items:
|
|
||||||
if not any(map(lambda r: r.match(key),
|
|
||||||
accumulation_whitelist)):
|
|
||||||
old_value = stats[type_k].get(key, 0)
|
|
||||||
stats[type_k][key] = old_value + value
|
|
||||||
|
|
||||||
rows = load_report(jar)
|
|
||||||
if len(rows) > 0:
|
|
||||||
headers.append(rows[0])
|
|
||||||
all_rows.extend(rows[1:])
|
|
||||||
|
|
||||||
# Override normals
|
|
||||||
stats['normal'].update(target_stats.get('normal', {}))
|
|
||||||
except NotFoundInJar:
|
|
||||||
pass
|
|
||||||
except zipfile.BadZipfile:
|
|
||||||
logging.warn('Bad zip file %s', path)
|
|
||||||
|
|
||||||
csv_report = os.path.join(args.infer_out, utils.CSV_REPORT_FILENAME)
|
|
||||||
bugs_out = os.path.join(args.infer_out, utils.BUGS_FILENAME)
|
|
||||||
|
|
||||||
if len(headers) == 0:
|
|
||||||
with open(csv_report, 'w'):
|
|
||||||
pass
|
|
||||||
logging.info('No reports found')
|
|
||||||
return
|
|
||||||
elif len(headers) > 1:
|
|
||||||
if any(map(lambda x: x != headers[0], headers)):
|
|
||||||
raise Exception('Inconsistent reports found')
|
|
||||||
|
|
||||||
# Convert all float values to integer values
|
|
||||||
for key, value in stats.get('float', {}).items():
|
|
||||||
stats['int'][key] = int(round(value))
|
|
||||||
|
|
||||||
# Delete the float entries before exporting the results
|
|
||||||
del(stats['float'])
|
|
||||||
|
|
||||||
with open(csv_report, 'w') as report:
|
|
||||||
writer = csv.writer(report)
|
|
||||||
writer.writerows([headers[0]] + rows_remove_duplicates(all_rows))
|
|
||||||
report.flush()
|
|
||||||
|
|
||||||
# export the CSV rows to JSON
|
|
||||||
utils.create_json_report(args.infer_out)
|
|
||||||
|
|
||||||
print('\n')
|
|
||||||
inferlib.print_errors(csv_report, bugs_out)
|
|
||||||
|
|
||||||
stats['int']['total_time'] = int(round(utils.elapsed_time(start_time)))
|
|
||||||
|
|
||||||
store_performances_csv(args.infer_out, stats)
|
|
||||||
|
|
||||||
stats_filename = os.path.join(args.infer_out, utils.STATS_FILENAME)
|
|
||||||
with open(stats_filename, 'w') as stats_out:
|
|
||||||
json.dump(stats, stats_out, indent=2)
|
|
||||||
|
|
||||||
basic_stats = get_basic_stats(stats)
|
|
||||||
|
|
||||||
if args.print_harness:
|
|
||||||
harness_code = get_harness_code()
|
|
||||||
basic_stats += harness_code
|
|
||||||
|
|
||||||
logging.info(basic_stats)
|
|
||||||
|
|
||||||
with open(os.path.join(args.infer_out, ANALYSIS_SUMMARY_OUTPUT), 'a') as f:
|
|
||||||
f.write(basic_stats)
|
|
||||||
|
|
||||||
|
|
||||||
def cleanup(temp_files):
|
|
||||||
"""Removes the generated .buckconfig.local and the temporary infer script.
|
|
||||||
"""
|
|
||||||
for file in [BUCK_CONFIG] + temp_files:
|
|
||||||
try:
|
|
||||||
logging.info('Removing %s' % file)
|
|
||||||
if os.path.isdir(file):
|
|
||||||
shutil.rmtree(file)
|
|
||||||
else:
|
|
||||||
os.unlink(file)
|
|
||||||
except IOError:
|
|
||||||
logging.error('Could not remove %s' % file)
|
|
||||||
|
|
||||||
if os.path.isfile(BUCK_CONFIG_BACKUP):
|
|
||||||
logging.info('Restoring %s', BUCK_CONFIG)
|
|
||||||
shutil.move(BUCK_CONFIG_BACKUP, BUCK_CONFIG)
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
parser = argparse.ArgumentParser(parents=[inferlib.base_parser])
|
|
||||||
parser.add_argument('--verbose', action='store_true',
|
|
||||||
help='Print buck compilation steps')
|
|
||||||
parser.add_argument('--no-cache', action='store_true',
|
|
||||||
help='Do not use buck distributed cache')
|
|
||||||
parser.add_argument('--print-harness', action='store_true',
|
|
||||||
help='Print generated harness code (Android only)')
|
|
||||||
parser.add_argument('targets', nargs='*', metavar='target',
|
|
||||||
help='Build targets to analyze')
|
|
||||||
args = parser.parse_args()
|
|
||||||
|
|
||||||
utils.configure_logging(args.verbose)
|
|
||||||
timer = utils.Timer(logging.info)
|
|
||||||
temp_files = []
|
|
||||||
|
|
||||||
try:
|
|
||||||
start_time = time.time()
|
|
||||||
logging.info('Starting the analysis')
|
|
||||||
subprocess.check_call(
|
|
||||||
[utils.get_cmd_in_bin_dir('InferAnalyze'), '-version'])
|
|
||||||
|
|
||||||
if not os.path.isdir(args.infer_out):
|
|
||||||
os.mkdir(args.infer_out)
|
|
||||||
|
|
||||||
timer.start('Preparing build...')
|
|
||||||
temp_files += prepare_build(args)
|
|
||||||
timer.stop('Build prepared')
|
|
||||||
|
|
||||||
# TODO(t3786463) Start buckd.
|
|
||||||
|
|
||||||
timer.start('Computing library targets')
|
|
||||||
determine_library_targets(args)
|
|
||||||
timer.stop('%d targets computed', len(args.targets))
|
|
||||||
|
|
||||||
timer.start('Running buck...')
|
|
||||||
buck_cmd = ['buck', 'build']
|
|
||||||
if args.no_cache:
|
|
||||||
buck_cmd += ['--no-cache']
|
|
||||||
if args.verbose:
|
|
||||||
buck_cmd += ['-v', '2']
|
|
||||||
subprocess.check_call(buck_cmd + args.targets)
|
|
||||||
timer.stop('Buck finished')
|
|
||||||
|
|
||||||
timer.start('Collecting results...')
|
|
||||||
collect_results(args, start_time)
|
|
||||||
timer.stop('Done')
|
|
||||||
|
|
||||||
except KeyboardInterrupt as e:
|
|
||||||
timer.stop('Exiting')
|
|
||||||
sys.exit(0)
|
|
||||||
except Exception as e:
|
|
||||||
timer.stop('Failed')
|
|
||||||
logging.error('Caught %s: %s' % (e.__class__.__name__, str(e)))
|
|
||||||
logging.error(traceback.format_exc())
|
|
||||||
sys.exit(1)
|
|
||||||
finally:
|
|
||||||
cleanup(temp_files)
|
|
||||||
|
|
||||||
|
|
||||||
# vim: set sw=4 ts=4 et:
|
|
@ -1,180 +1 @@
|
|||||||
#!/usr/bin/env python2.7
|
../lib/python/infer
|
||||||
|
|
||||||
import argparse
|
|
||||||
import imp
|
|
||||||
import utils
|
|
||||||
import inferlib
|
|
||||||
import json
|
|
||||||
import logging
|
|
||||||
import os
|
|
||||||
import sys
|
|
||||||
import platform
|
|
||||||
|
|
||||||
CAPTURE_PACKAGE = 'capture'
|
|
||||||
LIB_FOLDER = os.path.join(
|
|
||||||
os.path.dirname(os.path.realpath(__file__)), os.path.pardir, 'lib')
|
|
||||||
|
|
||||||
# token that identifies the end of the options for infer and the beginning
|
|
||||||
# of the compilation command
|
|
||||||
CMD_MARKER = '--'
|
|
||||||
|
|
||||||
# insert here the correspondence between module name and the list of
|
|
||||||
# compiler/build-systems it handles.
|
|
||||||
# All supported commands should be listed here
|
|
||||||
MODULE_TO_COMMAND = {
|
|
||||||
'ant': ['ant'],
|
|
||||||
'analyze': ['analyze'],
|
|
||||||
'buck': ['buck'],
|
|
||||||
'gradle': ['gradle', 'gradlew'],
|
|
||||||
'javac': ['javac'],
|
|
||||||
'make': ['make', 'clang', 'clang++', 'cc', 'gcc', 'g++'],
|
|
||||||
'xcodebuild': ['xcodebuild'],
|
|
||||||
'mvn': ['mvn']
|
|
||||||
}
|
|
||||||
|
|
||||||
FORMAT = '[%(levelname)s] %(message)s'
|
|
||||||
LOG_FILE = 'toplevel.log'
|
|
||||||
|
|
||||||
def get_commands():
|
|
||||||
"""Return all commands that are supported."""
|
|
||||||
#flatten and dedup the list of commands
|
|
||||||
return set(sum(MODULE_TO_COMMAND.values(), []))
|
|
||||||
|
|
||||||
|
|
||||||
def get_module_name(command):
|
|
||||||
""" Return module that is able to handle the command. None if
|
|
||||||
there is no such module."""
|
|
||||||
for module, commands in MODULE_TO_COMMAND.iteritems():
|
|
||||||
if command in commands:
|
|
||||||
return module
|
|
||||||
return None
|
|
||||||
|
|
||||||
|
|
||||||
def load_module(mod_name):
|
|
||||||
# load the 'capture' package in lib
|
|
||||||
pkg_info = imp.find_module(CAPTURE_PACKAGE, [LIB_FOLDER])
|
|
||||||
imported_pkg = imp.load_module(CAPTURE_PACKAGE, *pkg_info)
|
|
||||||
# load the requested module (e.g. make)
|
|
||||||
mod_file, mod_path, mod_descr = \
|
|
||||||
imp.find_module(mod_name, imported_pkg.__path__)
|
|
||||||
try:
|
|
||||||
return imp.load_module(
|
|
||||||
'{pkg}.{mod}'.format(pkg=imported_pkg.__name__, mod=mod_name),
|
|
||||||
mod_file, mod_path, mod_descr)
|
|
||||||
finally:
|
|
||||||
if mod_file:
|
|
||||||
mod_file.close()
|
|
||||||
|
|
||||||
|
|
||||||
def split_args_to_parse():
|
|
||||||
dd_index = \
|
|
||||||
sys.argv.index(CMD_MARKER) if CMD_MARKER in sys.argv else len(sys.argv)
|
|
||||||
return sys.argv[1:dd_index], sys.argv[dd_index + 1:]
|
|
||||||
|
|
||||||
|
|
||||||
def create_argparser(parents=[]):
|
|
||||||
parser = argparse.ArgumentParser(
|
|
||||||
parents=[inferlib.infer_parser] + parents,
|
|
||||||
add_help=False,
|
|
||||||
formatter_class=argparse.RawDescriptionHelpFormatter,
|
|
||||||
)
|
|
||||||
group = parser.add_argument_group(
|
|
||||||
'supported compiler/build-system commands')
|
|
||||||
|
|
||||||
supported_commands = ', '.join(get_commands())
|
|
||||||
group.add_argument(
|
|
||||||
CMD_MARKER,
|
|
||||||
metavar='<cmd>',
|
|
||||||
dest='nullarg',
|
|
||||||
default=None,
|
|
||||||
help=('Command to run the compiler/build-system. '
|
|
||||||
'Supported build commands (run `infer --help -- <cmd_name>` for '
|
|
||||||
'extra help, e.g. `infer --help -- javac`): ' + supported_commands),
|
|
||||||
)
|
|
||||||
return parser
|
|
||||||
|
|
||||||
|
|
||||||
def configure_logging(infer_dir, log_to_stderr):
|
|
||||||
if log_to_stderr:
|
|
||||||
logging.basicConfig(level=logging.INFO, format=FORMAT)
|
|
||||||
else:
|
|
||||||
logging.basicConfig(level=logging.INFO,
|
|
||||||
format=FORMAT,
|
|
||||||
filename=os.path.join(infer_dir, LOG_FILE),
|
|
||||||
filemode='w')
|
|
||||||
|
|
||||||
|
|
||||||
def main():
|
|
||||||
to_parse, cmd = split_args_to_parse()
|
|
||||||
# get the module name (if any), then load it
|
|
||||||
capture_module_name = os.path.basename(cmd[0]) if len(cmd) > 0 else None
|
|
||||||
mod_name = get_module_name(capture_module_name)
|
|
||||||
imported_module = None
|
|
||||||
if mod_name:
|
|
||||||
# There is module that supports the command
|
|
||||||
imported_module = load_module(mod_name)
|
|
||||||
|
|
||||||
# get the module's argparser and merge it with the global argparser
|
|
||||||
module_argparser = []
|
|
||||||
if imported_module:
|
|
||||||
module_argparser.append(
|
|
||||||
imported_module.create_argparser(capture_module_name)
|
|
||||||
)
|
|
||||||
global_argparser = create_argparser(module_argparser)
|
|
||||||
|
|
||||||
args = global_argparser.parse_args(to_parse)
|
|
||||||
|
|
||||||
if (imported_module and not args.incremental and
|
|
||||||
capture_module_name != 'analyze'):
|
|
||||||
inferlib.remove_infer_out(args.infer_out)
|
|
||||||
|
|
||||||
inferlib.create_results_dir(args.infer_out)
|
|
||||||
|
|
||||||
configure_logging(args.infer_out, args.log_to_stderr)
|
|
||||||
logging.info('Running command %s', ' '.join(sys.argv))
|
|
||||||
logging.info('Path to infer script %s (%s)', __file__,
|
|
||||||
os.path.realpath(__file__))
|
|
||||||
logging.info(inferlib.get_infer_version())
|
|
||||||
logging.info('Platform: %s', platform.platform())
|
|
||||||
logging.info('PATH=%s', os.getenv('PATH'))
|
|
||||||
logging.info('SHELL=%s', os.getenv('SHELL'))
|
|
||||||
logging.info('PWD=%s', os.getenv('PWD'))
|
|
||||||
|
|
||||||
if imported_module:
|
|
||||||
capture_exitcode = imported_module.gen_instance(args, cmd).capture()
|
|
||||||
if capture_exitcode != os.EX_OK:
|
|
||||||
logging.error('Error during capture phase, exiting')
|
|
||||||
exit(capture_exitcode)
|
|
||||||
logging.info('Capture phase was successful')
|
|
||||||
elif capture_module_name is not None:
|
|
||||||
# There was a command, but it's not supported
|
|
||||||
print('Command "{cmd}" not recognised'.format(
|
|
||||||
cmd='' if capture_module_name is None else capture_module_name))
|
|
||||||
global_argparser.print_help()
|
|
||||||
sys.exit(1)
|
|
||||||
else:
|
|
||||||
global_argparser.print_help()
|
|
||||||
sys.exit(os.EX_OK)
|
|
||||||
|
|
||||||
if not (mod_name == 'buck' or mod_name == 'javac'):
|
|
||||||
# Something should be already captured, otherwise analysis would fail
|
|
||||||
if not os.path.exists(os.path.join(args.infer_out, 'captured')):
|
|
||||||
print('There was nothing to analyze, exiting')
|
|
||||||
exit(os.EX_USAGE)
|
|
||||||
analysis = inferlib.Infer(args, [])
|
|
||||||
analysis.analyze_and_report()
|
|
||||||
analysis.save_stats()
|
|
||||||
|
|
||||||
if args.fail_on_bug:
|
|
||||||
bugs_filename = os.path.join(args.infer_out,
|
|
||||||
utils.JSON_REPORT_FILENAME)
|
|
||||||
try:
|
|
||||||
with open(bugs_filename) as bugs_file:
|
|
||||||
bugs = json.load(bugs_file)
|
|
||||||
if len(bugs) > 0:
|
|
||||||
sys.exit(inferlib.BUG_FOUND_ERROR_CODE)
|
|
||||||
except OSError:
|
|
||||||
pass
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
main()
|
|
@ -1,399 +1 @@
|
|||||||
#!/usr/bin/env python2.7
|
../lib/python/inferTraceBugs
|
||||||
|
|
||||||
# Copyright (c) 2013 - present Facebook, Inc.
|
|
||||||
# All rights reserved.
|
|
||||||
#
|
|
||||||
# This source code is licensed under the BSD style license found in the
|
|
||||||
# LICENSE file in the root directory of this source tree. An additional grant
|
|
||||||
# of patent rights can be found in the PATENTS file in the same directory.
|
|
||||||
|
|
||||||
from __future__ import absolute_import
|
|
||||||
from __future__ import division
|
|
||||||
from __future__ import print_function
|
|
||||||
from __future__ import unicode_literals
|
|
||||||
|
|
||||||
import argparse
|
|
||||||
import json
|
|
||||||
import os
|
|
||||||
import re
|
|
||||||
import shutil
|
|
||||||
import subprocess
|
|
||||||
import sys
|
|
||||||
|
|
||||||
# Infer imports
|
|
||||||
import utils
|
|
||||||
import inferlib
|
|
||||||
|
|
||||||
HTML_REPORT_DIR = 'report.html'
|
|
||||||
TRACES_REPORT_DIR = 'traces'
|
|
||||||
SOURCE_REMOTE_GITHUB_URL_TEMPLATE = ('https://github.com/{project}/blob/'
|
|
||||||
'{hash}/{relative-path}/'
|
|
||||||
'{file-name}#L{line-number}')
|
|
||||||
SOURCE_REMOTE_GITHUB_RE = re.compile('.*github.com[:/](?P<project>.*)')
|
|
||||||
|
|
||||||
|
|
||||||
base_parser = argparse.ArgumentParser(
|
|
||||||
description='Explore the error traces in Infer reports.')
|
|
||||||
base_parser.add_argument('-o', '--out', metavar='<directory>',
|
|
||||||
default=utils.DEFAULT_INFER_OUT, dest='infer_out',
|
|
||||||
action=utils.AbsolutePathAction,
|
|
||||||
help='Set the Infer results directory')
|
|
||||||
base_parser.add_argument('--only-show',
|
|
||||||
action='store_true',
|
|
||||||
help='Show the list of reports and exit')
|
|
||||||
base_parser.add_argument('--no-source',
|
|
||||||
action='store_true',
|
|
||||||
help='Do not print code excerpts')
|
|
||||||
base_parser.add_argument('--select',
|
|
||||||
metavar='N',
|
|
||||||
nargs=1,
|
|
||||||
help='Select bug number N. '
|
|
||||||
'If omitted, prompts you for input.')
|
|
||||||
base_parser.add_argument('--max-level',
|
|
||||||
metavar='N',
|
|
||||||
nargs=1,
|
|
||||||
help='Level of nested procedure calls to show. '
|
|
||||||
'Can be "max", in which case all levels are shown. '
|
|
||||||
'If omitted, prompts you for input.')
|
|
||||||
base_parser.add_argument('--html',
|
|
||||||
action='store_true',
|
|
||||||
help='Generate HTML report.')
|
|
||||||
|
|
||||||
|
|
||||||
def describe_report(report, indent=0):
|
|
||||||
filename = report['file']
|
|
||||||
kind = report['kind']
|
|
||||||
line = report['line']
|
|
||||||
error_type = report['type']
|
|
||||||
msg = report['qualifier']
|
|
||||||
return '{0}:{1}: {2}: {3}\n {4}{5}\n'.format(
|
|
||||||
filename,
|
|
||||||
line,
|
|
||||||
kind.lower(),
|
|
||||||
error_type,
|
|
||||||
' ' * indent,
|
|
||||||
msg,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def show_error_and_exit(err, show_help):
|
|
||||||
print(err)
|
|
||||||
if show_help:
|
|
||||||
print('')
|
|
||||||
base_parser.print_help()
|
|
||||||
exit(1)
|
|
||||||
|
|
||||||
|
|
||||||
class Tracer(object):
|
|
||||||
def __init__(self, args, level=sys.maxsize):
|
|
||||||
self.args = args
|
|
||||||
self.max_level = level
|
|
||||||
self.indenter = utils.Indenter()
|
|
||||||
|
|
||||||
def build_node_tags(self, node):
|
|
||||||
pass
|
|
||||||
|
|
||||||
def build_node(self, node):
|
|
||||||
if node['level'] > self.max_level:
|
|
||||||
return
|
|
||||||
|
|
||||||
report_line = node['line_number']
|
|
||||||
fname = node['filename']
|
|
||||||
|
|
||||||
self.indenter.newline()
|
|
||||||
self.indenter.add('%s:%d: %s' % (fname,
|
|
||||||
report_line,
|
|
||||||
node['description']))
|
|
||||||
self.indenter.newline()
|
|
||||||
|
|
||||||
if not self.args.no_source:
|
|
||||||
self.indenter.indent_push(node['level'])
|
|
||||||
self.indenter.add(utils.build_source_context(fname, report_line))
|
|
||||||
self.indenter.indent_pop()
|
|
||||||
self.indenter.newline()
|
|
||||||
|
|
||||||
def build_trace(self, trace):
|
|
||||||
total_nodes = len(trace)
|
|
||||||
hidden_nodes = len([None for n in trace if n['level'] > self.max_level])
|
|
||||||
shown_nodes = total_nodes - hidden_nodes
|
|
||||||
hidden_str = ''
|
|
||||||
all_str = 'all '
|
|
||||||
if hidden_nodes > 0:
|
|
||||||
hidden_str = ' (%d steps too deeply nested)' % hidden_nodes
|
|
||||||
all_str = ''
|
|
||||||
self.indenter.add('Showing %s%d steps of the trace%s\n\n'
|
|
||||||
% (all_str, shown_nodes, hidden_str))
|
|
||||||
self.indenter.newline()
|
|
||||||
|
|
||||||
for node in trace:
|
|
||||||
self.build_node(node)
|
|
||||||
|
|
||||||
def build_report(self, report):
|
|
||||||
traces = json.loads(report['trace'])
|
|
||||||
self.build_trace(traces['trace'])
|
|
||||||
|
|
||||||
def __str__(self):
|
|
||||||
return str(self.indenter)
|
|
||||||
|
|
||||||
|
|
||||||
class Selector(object):
|
|
||||||
def __init__(self, args, reports):
|
|
||||||
self.args = args
|
|
||||||
|
|
||||||
def has_trace(report):
|
|
||||||
trace = json.loads(report['trace'])
|
|
||||||
return len(trace['trace']) > 0
|
|
||||||
self.reports = [report for report in reports if has_trace(report)]
|
|
||||||
|
|
||||||
def show_choices(self):
|
|
||||||
n = 0
|
|
||||||
n_length = len(str(len(self)))
|
|
||||||
for report in self.reports:
|
|
||||||
print(str(n).rjust(n_length) + '. ' +
|
|
||||||
describe_report(report, n_length + 2))
|
|
||||||
n += 1
|
|
||||||
|
|
||||||
def prompt_report(self):
|
|
||||||
report_number = 0
|
|
||||||
if self.args.select is not None:
|
|
||||||
report_number = self.parse_report_number(self.args.select[0], True)
|
|
||||||
else:
|
|
||||||
self.show_choices()
|
|
||||||
|
|
||||||
if len(self) > 1:
|
|
||||||
report_number_str = raw_input(
|
|
||||||
'Choose report to display (default=0): ')
|
|
||||||
if report_number_str != '':
|
|
||||||
report_number = self.parse_report_number(report_number_str)
|
|
||||||
elif len(self) == 1:
|
|
||||||
print('Auto-selecting the only report.')
|
|
||||||
|
|
||||||
return self.reports[report_number]
|
|
||||||
|
|
||||||
def prompt_level(self):
|
|
||||||
if self.args.max_level is not None:
|
|
||||||
return self.parse_max_level(self.args.max_level[0], True)
|
|
||||||
|
|
||||||
max_level_str = raw_input(
|
|
||||||
'Choose maximum level of nested procedures calls (default=max): ')
|
|
||||||
if max_level_str == '':
|
|
||||||
max_level = sys.maxsize
|
|
||||||
else:
|
|
||||||
max_level = self.parse_max_level(max_level_str)
|
|
||||||
|
|
||||||
print('')
|
|
||||||
|
|
||||||
return max_level
|
|
||||||
|
|
||||||
def parse_report_number(self, s, show_help=False):
|
|
||||||
try:
|
|
||||||
n = int(s)
|
|
||||||
except ValueError:
|
|
||||||
show_error_and_exit(
|
|
||||||
'ERROR: integer report number expected',
|
|
||||||
show_help)
|
|
||||||
|
|
||||||
if n >= len(self) or n < 0:
|
|
||||||
show_error_and_exit('ERROR: invalid report number.', show_help)
|
|
||||||
|
|
||||||
return n
|
|
||||||
|
|
||||||
def parse_max_level(self, s, show_help=False):
|
|
||||||
if s == 'max':
|
|
||||||
return sys.maxsize
|
|
||||||
|
|
||||||
try:
|
|
||||||
n = int(s)
|
|
||||||
except ValueError:
|
|
||||||
show_error_and_exit(
|
|
||||||
'ERROR: integer max level or "max" expected',
|
|
||||||
show_help)
|
|
||||||
|
|
||||||
if n < 0:
|
|
||||||
show_error_and_exit('ERROR: invalid max level.', show_help)
|
|
||||||
|
|
||||||
return n
|
|
||||||
|
|
||||||
def __len__(self):
|
|
||||||
return len(self.reports)
|
|
||||||
|
|
||||||
def __iter__(self):
|
|
||||||
return self.reports.__iter__()
|
|
||||||
|
|
||||||
def __next__(self):
|
|
||||||
return self.reports.__next__()
|
|
||||||
|
|
||||||
|
|
||||||
def path_of_bug_number(traces_dir, i):
|
|
||||||
return os.path.join(traces_dir, 'bug_%d.txt' % (i+1))
|
|
||||||
|
|
||||||
|
|
||||||
def url_of_bug_number(i):
|
|
||||||
return '%s/bug_%d.txt' % (TRACES_REPORT_DIR, i+1)
|
|
||||||
|
|
||||||
|
|
||||||
def get_remote_source_template():
|
|
||||||
"""Return a template that given 'file-name' and 'line-number' entries
|
|
||||||
gives a remote url to that source location. Return the empty
|
|
||||||
template if no remote source has been detected. Currently only
|
|
||||||
detects GitHub projects.
|
|
||||||
"""
|
|
||||||
# see if we are in a GitHub project clone
|
|
||||||
try:
|
|
||||||
git_remote = subprocess.check_output(
|
|
||||||
['git',
|
|
||||||
'config',
|
|
||||||
'--get',
|
|
||||||
'remote.origin.url']).decode().strip()
|
|
||||||
m = SOURCE_REMOTE_GITHUB_RE.match(git_remote)
|
|
||||||
if m is not None:
|
|
||||||
project = m.group('project')
|
|
||||||
# some remotes end in .git, but the http urls don't have
|
|
||||||
# these
|
|
||||||
if project.endswith('.git'):
|
|
||||||
project = project[:-len('.git')]
|
|
||||||
print('Detected GitHub project %s' % project)
|
|
||||||
hash = subprocess.check_output(
|
|
||||||
['git',
|
|
||||||
'rev-parse',
|
|
||||||
'HEAD']).decode().strip()
|
|
||||||
root = subprocess.check_output(
|
|
||||||
['git',
|
|
||||||
'rev-parse',
|
|
||||||
'--show-toplevel']).decode().strip()
|
|
||||||
# FIXME(t8921813): we should have a way to get absolute
|
|
||||||
# paths in traces. In the meantime, trust that we run from
|
|
||||||
# the same directory from which infer was run.
|
|
||||||
relative_path = os.path.relpath(os.getcwd(), root)
|
|
||||||
d = {
|
|
||||||
'project': project,
|
|
||||||
'hash': hash,
|
|
||||||
'relative-path': relative_path,
|
|
||||||
'file-name': '{file-name}',
|
|
||||||
'line-number': '{line-number}',
|
|
||||||
}
|
|
||||||
return SOURCE_REMOTE_GITHUB_URL_TEMPLATE.format(**d)
|
|
||||||
except subprocess.CalledProcessError:
|
|
||||||
pass
|
|
||||||
|
|
||||||
return None
|
|
||||||
|
|
||||||
|
|
||||||
def html_bug_trace(args, report, bug_id):
|
|
||||||
bug_trace = ''
|
|
||||||
bug_trace += '%s\n' % describe_report(report)
|
|
||||||
tracer = Tracer(args)
|
|
||||||
tracer.build_report(report)
|
|
||||||
bug_trace += str(tracer)
|
|
||||||
return bug_trace
|
|
||||||
|
|
||||||
|
|
||||||
def html_list_of_bugs(args, remote_source_template, selector):
|
|
||||||
template = '\n'.join([
|
|
||||||
'<html>',
|
|
||||||
'<head>',
|
|
||||||
'<title>Infer found {num-bugs} bugs</title>',
|
|
||||||
'</head>',
|
|
||||||
'<body>',
|
|
||||||
'<h2>List of bugs found</h2>',
|
|
||||||
'{list-of-bugs}',
|
|
||||||
'</body>',
|
|
||||||
'</html>',
|
|
||||||
])
|
|
||||||
|
|
||||||
report_template = '\n'.join([
|
|
||||||
'<li>',
|
|
||||||
'{description}',
|
|
||||||
'({source-uri}<a href="{trace-url}">trace</a>)',
|
|
||||||
'</li>',
|
|
||||||
])
|
|
||||||
|
|
||||||
def source_uri(report):
|
|
||||||
d = {
|
|
||||||
'file-name': report['file'],
|
|
||||||
'line-number': report['line'],
|
|
||||||
}
|
|
||||||
if remote_source_template is not None:
|
|
||||||
link = remote_source_template.format(**d)
|
|
||||||
return '<a href="%s">source</a> | ' % link
|
|
||||||
return ''
|
|
||||||
|
|
||||||
i = 0
|
|
||||||
list_of_bugs = '<ol>'
|
|
||||||
for report in selector:
|
|
||||||
d = {
|
|
||||||
'description': describe_report(report, 2),
|
|
||||||
'trace-url': url_of_bug_number(i),
|
|
||||||
'source-uri': source_uri(report),
|
|
||||||
}
|
|
||||||
list_of_bugs += report_template.format(**d)
|
|
||||||
i += 1
|
|
||||||
list_of_bugs += '</ol>'
|
|
||||||
|
|
||||||
d = {
|
|
||||||
'num-bugs': len(selector),
|
|
||||||
'list-of-bugs': list_of_bugs,
|
|
||||||
}
|
|
||||||
return template.format(**d)
|
|
||||||
|
|
||||||
|
|
||||||
def generate_html_report(args, reports):
|
|
||||||
html_dir = os.path.join(args.infer_out, HTML_REPORT_DIR)
|
|
||||||
shutil.rmtree(html_dir, True)
|
|
||||||
inferlib.mkdir_if_not_exists(html_dir)
|
|
||||||
|
|
||||||
traces_dir = os.path.join(html_dir, TRACES_REPORT_DIR)
|
|
||||||
inferlib.mkdir_if_not_exists(traces_dir)
|
|
||||||
|
|
||||||
sel = Selector(args, reports)
|
|
||||||
|
|
||||||
i = 0
|
|
||||||
for bug in sel:
|
|
||||||
bug_trace_path = path_of_bug_number(traces_dir, i)
|
|
||||||
with open(bug_trace_path, 'w') as bug_trace_file:
|
|
||||||
bug_trace_file.write(html_bug_trace(args, bug, i))
|
|
||||||
i += 1
|
|
||||||
|
|
||||||
remote_source_template = get_remote_source_template()
|
|
||||||
bug_list_path = os.path.join(html_dir, 'index.html')
|
|
||||||
with open(bug_list_path, 'w') as bug_list_file:
|
|
||||||
bug_list_file.write(html_list_of_bugs(args,
|
|
||||||
remote_source_template,
|
|
||||||
sel))
|
|
||||||
|
|
||||||
print('Saved html report in:\n%s' % bug_list_path)
|
|
||||||
|
|
||||||
|
|
||||||
def main():
|
|
||||||
args = base_parser.parse_args()
|
|
||||||
|
|
||||||
report_filename = os.path.join(args.infer_out, utils.JSON_REPORT_FILENAME)
|
|
||||||
with open(report_filename) as report_file:
|
|
||||||
reports = json.load(report_file)
|
|
||||||
|
|
||||||
if args.html:
|
|
||||||
generate_html_report(args, reports)
|
|
||||||
exit(0)
|
|
||||||
|
|
||||||
sel = Selector(args, reports)
|
|
||||||
|
|
||||||
if len(sel) == 0:
|
|
||||||
print('No issues found')
|
|
||||||
exit(0)
|
|
||||||
|
|
||||||
if args.only_show:
|
|
||||||
sel.show_choices()
|
|
||||||
exit(0)
|
|
||||||
|
|
||||||
report = sel.prompt_report()
|
|
||||||
max_level = sel.prompt_level()
|
|
||||||
|
|
||||||
print(describe_report(report))
|
|
||||||
|
|
||||||
tracer = Tracer(args, max_level)
|
|
||||||
tracer.build_report(report)
|
|
||||||
print(tracer)
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
main()
|
|
@ -0,0 +1,541 @@
|
|||||||
|
#!/usr/bin/env python2.7
|
||||||
|
|
||||||
|
# Copyright (c) 2013 - present Facebook, Inc.
|
||||||
|
# All rights reserved.
|
||||||
|
#
|
||||||
|
# This source code is licensed under the BSD style license found in the
|
||||||
|
# LICENSE file in the root directory of this source tree. An additional grant
|
||||||
|
# of patent rights can be found in the PATENTS file in the same directory.
|
||||||
|
|
||||||
|
from __future__ import absolute_import
|
||||||
|
from __future__ import division
|
||||||
|
from __future__ import print_function
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
|
import argparse
|
||||||
|
import csv
|
||||||
|
import io
|
||||||
|
import json
|
||||||
|
import logging
|
||||||
|
import multiprocessing
|
||||||
|
import os
|
||||||
|
import platform
|
||||||
|
import re
|
||||||
|
import shutil
|
||||||
|
import stat
|
||||||
|
import subprocess
|
||||||
|
import sys
|
||||||
|
import tempfile
|
||||||
|
import time
|
||||||
|
import traceback
|
||||||
|
import zipfile
|
||||||
|
|
||||||
|
from inferlib import infer, utils
|
||||||
|
|
||||||
|
|
||||||
|
ANALYSIS_SUMMARY_OUTPUT = 'analysis_summary.txt'
|
||||||
|
|
||||||
|
BUCK_CONFIG = '.buckconfig.local'
|
||||||
|
BUCK_CONFIG_BACKUP = '.buckconfig.local.backup_generated_by_infer'
|
||||||
|
DEFAULT_BUCK_OUT = os.path.join(os.getcwd(), 'buck-out')
|
||||||
|
DEFAULT_BUCK_OUT_GEN = os.path.join(DEFAULT_BUCK_OUT, 'gen')
|
||||||
|
|
||||||
|
INFER_REPORT = os.path.join(utils.BUCK_INFER_OUT, utils.CSV_REPORT_FILENAME)
|
||||||
|
INFER_STATS = os.path.join(utils.BUCK_INFER_OUT, utils.STATS_FILENAME)
|
||||||
|
|
||||||
|
INFER_SCRIPT = """\
|
||||||
|
#!/usr/bin/env {0}
|
||||||
|
import subprocess
|
||||||
|
import sys
|
||||||
|
|
||||||
|
cmd = ['{0}'] + {1} + ['--', 'javac'] + sys.argv[1:]
|
||||||
|
subprocess.check_call(cmd)
|
||||||
|
"""
|
||||||
|
|
||||||
|
LOCAL_CONFIG = """\
|
||||||
|
[tools]
|
||||||
|
javac = %s
|
||||||
|
"""
|
||||||
|
|
||||||
|
|
||||||
|
def prepare_build(args):
|
||||||
|
"""Creates script that redirects javac calls to infer and a local buck
|
||||||
|
configuration that tells buck to use that script.
|
||||||
|
"""
|
||||||
|
|
||||||
|
infer_options = [
|
||||||
|
'--buck',
|
||||||
|
'--incremental',
|
||||||
|
'--analyzer', args.analyzer,
|
||||||
|
]
|
||||||
|
|
||||||
|
if args.debug:
|
||||||
|
infer_options.append('--debug')
|
||||||
|
|
||||||
|
if args.no_filtering:
|
||||||
|
infer_options.append('--no-filtering')
|
||||||
|
|
||||||
|
if args.infer_out is not None:
|
||||||
|
infer_options += ['--out', args.infer_out]
|
||||||
|
|
||||||
|
# Create a temporary directory as a cache for jar files.
|
||||||
|
infer_cache_dir = os.path.join(args.infer_out, 'cache')
|
||||||
|
if not os.path.isdir(infer_cache_dir):
|
||||||
|
os.mkdir(infer_cache_dir)
|
||||||
|
infer_options.append('--infer_cache')
|
||||||
|
infer_options.append(infer_cache_dir)
|
||||||
|
temp_files = [infer_cache_dir]
|
||||||
|
|
||||||
|
try:
|
||||||
|
infer = [utils.get_cmd_in_bin_dir('infer')] + infer_options
|
||||||
|
except subprocess.CalledProcessError as e:
|
||||||
|
logging.error('Could not find infer')
|
||||||
|
raise e
|
||||||
|
|
||||||
|
# Disable the use of buckd as this scripts modifies .buckconfig.local
|
||||||
|
logging.info('Disabling buckd: export NO_BUCKD=1')
|
||||||
|
os.environ['NO_BUCKD'] = '1'
|
||||||
|
|
||||||
|
# make sure INFER_ANALYSIS is set when buck is called
|
||||||
|
logging.info('Setup Infer analysis mode for Buck: export INFER_ANALYSIS=1')
|
||||||
|
os.environ['INFER_ANALYSIS'] = '1'
|
||||||
|
|
||||||
|
# Create a script to be called by buck
|
||||||
|
infer_script = None
|
||||||
|
with tempfile.NamedTemporaryFile(delete=False,
|
||||||
|
prefix='infer_',
|
||||||
|
suffix='.py',
|
||||||
|
dir='.') as infer_script:
|
||||||
|
logging.info('Creating %s' % infer_script.name)
|
||||||
|
infer_script.file.write(
|
||||||
|
(INFER_SCRIPT.format(sys.executable, infer)).encode())
|
||||||
|
|
||||||
|
st = os.stat(infer_script.name)
|
||||||
|
os.chmod(infer_script.name, st.st_mode | stat.S_IEXEC)
|
||||||
|
|
||||||
|
# Backup and patch local buck config
|
||||||
|
patched_config = ''
|
||||||
|
if os.path.isfile(BUCK_CONFIG):
|
||||||
|
logging.info('Backing up %s to %s', BUCK_CONFIG, BUCK_CONFIG_BACKUP)
|
||||||
|
shutil.move(BUCK_CONFIG, BUCK_CONFIG_BACKUP)
|
||||||
|
with open(BUCK_CONFIG_BACKUP) as buckconfig:
|
||||||
|
patched_config = '\n'.join(buckconfig)
|
||||||
|
|
||||||
|
javac_section = '[tools]\n{0}javac = {1}'.format(
|
||||||
|
' ' * 4,
|
||||||
|
infer_script.name)
|
||||||
|
patched_config += javac_section
|
||||||
|
with open(BUCK_CONFIG, 'w') as buckconfig:
|
||||||
|
buckconfig.write(patched_config)
|
||||||
|
|
||||||
|
temp_files += [infer_script.name]
|
||||||
|
return temp_files
|
||||||
|
|
||||||
|
|
||||||
|
def java_targets():
|
||||||
|
target_types = [
|
||||||
|
'android_library',
|
||||||
|
'java_library',
|
||||||
|
]
|
||||||
|
try:
|
||||||
|
targets = subprocess.check_output([
|
||||||
|
'buck',
|
||||||
|
'targets',
|
||||||
|
'--type',
|
||||||
|
] + target_types).decode().strip().split('\n')
|
||||||
|
except subprocess.CalledProcessError as e:
|
||||||
|
logging.error('Could not compute java library targets')
|
||||||
|
raise e
|
||||||
|
return set(targets)
|
||||||
|
|
||||||
|
|
||||||
|
def is_alias(target):
|
||||||
|
return ':' not in target
|
||||||
|
|
||||||
|
|
||||||
|
def expand_target(target, java_targets):
|
||||||
|
if not is_alias(target):
|
||||||
|
return [target]
|
||||||
|
else:
|
||||||
|
try:
|
||||||
|
buck_audit_cmd = ['buck', 'audit', 'classpath', '--dot', target]
|
||||||
|
output = subprocess.check_output(buck_audit_cmd)
|
||||||
|
dotty = output.decode().split('\n')
|
||||||
|
except subprocess.CalledProcessError as e:
|
||||||
|
logging.error('Could not expand target {0}'.format(target))
|
||||||
|
raise e
|
||||||
|
targets = set()
|
||||||
|
edge_re = re.compile('.*"(.*)".*"(.*)".*')
|
||||||
|
for line in dotty:
|
||||||
|
match = re.match(edge_re, line)
|
||||||
|
if match:
|
||||||
|
for t in match.groups():
|
||||||
|
if t in java_targets:
|
||||||
|
targets.add(t)
|
||||||
|
return targets
|
||||||
|
|
||||||
|
|
||||||
|
def normalize_target(target):
|
||||||
|
if is_alias(target) or target.startswith('//'):
|
||||||
|
return target
|
||||||
|
else:
|
||||||
|
return '//' + target
|
||||||
|
|
||||||
|
|
||||||
|
def determine_library_targets(args):
|
||||||
|
""" Uses git and buck audit to expand aliases into the list of java or
|
||||||
|
android library targets that are parts of these aliases.
|
||||||
|
Buck targets directly passed as argument are not expanded """
|
||||||
|
|
||||||
|
args.targets = [normalize_target(t) for t in args.targets]
|
||||||
|
|
||||||
|
if any(map(is_alias, args.targets)):
|
||||||
|
all_java_targets = java_targets()
|
||||||
|
targets = set()
|
||||||
|
for t in args.targets:
|
||||||
|
targets.update(expand_target(t, all_java_targets))
|
||||||
|
args.targets = list(targets)
|
||||||
|
|
||||||
|
if args.verbose:
|
||||||
|
logging.debug('Targets to analyze:')
|
||||||
|
for target in args.targets:
|
||||||
|
logging.debug(target)
|
||||||
|
|
||||||
|
|
||||||
|
def init_stats(args, start_time):
|
||||||
|
"""Returns dictionary with target independent statistics.
|
||||||
|
"""
|
||||||
|
return {
|
||||||
|
'float': {},
|
||||||
|
'int': {
|
||||||
|
'cores': multiprocessing.cpu_count(),
|
||||||
|
'time': int(time.time()),
|
||||||
|
'start_time': int(round(start_time)),
|
||||||
|
},
|
||||||
|
'normal': {
|
||||||
|
'debug': str(args.debug),
|
||||||
|
'analyzer': args.analyzer,
|
||||||
|
'machine': platform.machine(),
|
||||||
|
'node': platform.node(),
|
||||||
|
'project': os.path.basename(os.getcwd()),
|
||||||
|
'revision': utils.vcs_revision(),
|
||||||
|
'branch': utils.vcs_branch(),
|
||||||
|
'system': platform.system(),
|
||||||
|
'infer_version': utils.infer_version(),
|
||||||
|
'infer_branch': utils.infer_branch(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def store_performances_csv(infer_out, stats):
|
||||||
|
"""Stores the statistics about perfromances into a CSV file to be exported
|
||||||
|
to a database"""
|
||||||
|
perf_filename = os.path.join(infer_out, utils.CSV_PERF_FILENAME)
|
||||||
|
with open(perf_filename, 'w') as csv_file_out:
|
||||||
|
csv_writer = csv.writer(csv_file_out)
|
||||||
|
keys = ['infer_version', 'project', 'revision', 'files', 'lines',
|
||||||
|
'cores', 'system', 'machine', 'node', 'total_time',
|
||||||
|
'capture_time', 'analysis_time', 'reporting_time', 'time']
|
||||||
|
int_stats = list(stats['int'].items())
|
||||||
|
normal_stats = list(stats['normal'].items())
|
||||||
|
flat_stats = dict(int_stats + normal_stats)
|
||||||
|
values = []
|
||||||
|
for key in keys:
|
||||||
|
values.append(flat_stats[key])
|
||||||
|
csv_writer.writerow(keys)
|
||||||
|
csv_writer.writerow(values)
|
||||||
|
csv_file_out.flush()
|
||||||
|
|
||||||
|
|
||||||
|
def get_harness_code():
|
||||||
|
all_harness_code = '\nGenerated harness code:\n'
|
||||||
|
for filename in os.listdir(DEFAULT_BUCK_OUT_GEN):
|
||||||
|
if 'InferGeneratedHarness' in filename:
|
||||||
|
all_harness_code += '\n' + filename + ':\n'
|
||||||
|
with open(os.path.join(DEFAULT_BUCK_OUT_GEN,
|
||||||
|
filename), 'r') as file_in:
|
||||||
|
all_harness_code += file_in.read()
|
||||||
|
return all_harness_code + '\n'
|
||||||
|
|
||||||
|
|
||||||
|
def get_basic_stats(stats):
|
||||||
|
files_analyzed = '{0} files ({1} lines) analyzed in {2}s\n\n'.format(
|
||||||
|
stats['int']['files'],
|
||||||
|
stats['int']['lines'],
|
||||||
|
stats['int']['total_time'],
|
||||||
|
)
|
||||||
|
phase_times = 'Capture time: {0}s\nAnalysis time: {1}s\n\n'.format(
|
||||||
|
stats['int']['capture_time'],
|
||||||
|
stats['int']['analysis_time'],
|
||||||
|
)
|
||||||
|
|
||||||
|
to_skip = {
|
||||||
|
'files',
|
||||||
|
'procedures',
|
||||||
|
'lines',
|
||||||
|
'cores',
|
||||||
|
'time',
|
||||||
|
'start_time',
|
||||||
|
'capture_time',
|
||||||
|
'analysis_time',
|
||||||
|
'reporting_time',
|
||||||
|
'total_time',
|
||||||
|
'makefile_generation_time'
|
||||||
|
}
|
||||||
|
bugs_found = 'Errors found:\n\n'
|
||||||
|
for key, value in sorted(stats['int'].items()):
|
||||||
|
if key not in to_skip:
|
||||||
|
bugs_found += ' {0:>8} {1}\n'.format(value, key)
|
||||||
|
|
||||||
|
basic_stats_message = files_analyzed + phase_times + bugs_found + '\n'
|
||||||
|
return basic_stats_message
|
||||||
|
|
||||||
|
|
||||||
|
def get_buck_stats():
|
||||||
|
trace_filename = os.path.join(
|
||||||
|
DEFAULT_BUCK_OUT,
|
||||||
|
'log',
|
||||||
|
'traces',
|
||||||
|
'build.trace'
|
||||||
|
)
|
||||||
|
ARGS = 'args'
|
||||||
|
SUCCESS_STATUS = 'success_type'
|
||||||
|
buck_stats = {}
|
||||||
|
try:
|
||||||
|
with open(trace_filename, 'r') as file_in:
|
||||||
|
trace = json.load(file_in)
|
||||||
|
for t in trace:
|
||||||
|
if SUCCESS_STATUS in t[ARGS]:
|
||||||
|
status = t[ARGS][SUCCESS_STATUS]
|
||||||
|
count = buck_stats.get(status, 0)
|
||||||
|
buck_stats[status] = count + 1
|
||||||
|
|
||||||
|
buck_stats_message = 'Buck build statistics:\n\n'
|
||||||
|
for key, value in sorted(buck_stats.items()):
|
||||||
|
buck_stats_message += ' {0:>8} {1}\n'.format(value, key)
|
||||||
|
|
||||||
|
return buck_stats_message
|
||||||
|
except IOError as e:
|
||||||
|
logging.error('Caught %s: %s' % (e.__class__.__name__, str(e)))
|
||||||
|
logging.error(traceback.format_exc())
|
||||||
|
return ''
|
||||||
|
|
||||||
|
|
||||||
|
class NotFoundInJar(Exception):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
def load_stats(opened_jar):
|
||||||
|
try:
|
||||||
|
return json.loads(opened_jar.read(INFER_STATS).decode())
|
||||||
|
except KeyError as e:
|
||||||
|
raise NotFoundInJar
|
||||||
|
|
||||||
|
|
||||||
|
def load_report(opened_jar):
|
||||||
|
try:
|
||||||
|
sio = io.StringIO(opened_jar.read(INFER_REPORT).decode())
|
||||||
|
return list(csv.reader(sio))
|
||||||
|
except KeyError as e:
|
||||||
|
raise NotFoundInJar
|
||||||
|
|
||||||
|
|
||||||
|
def rows_remove_duplicates(rows):
|
||||||
|
seen = {}
|
||||||
|
result = []
|
||||||
|
for row in rows:
|
||||||
|
t = tuple(row)
|
||||||
|
if t in seen:
|
||||||
|
continue
|
||||||
|
seen[t] = 1
|
||||||
|
result.append(row)
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
|
def collect_results(args, start_time):
|
||||||
|
"""Walks through buck-gen, collects results for the different buck targets
|
||||||
|
and stores them in in args.infer_out/results.csv.
|
||||||
|
"""
|
||||||
|
buck_stats = get_buck_stats()
|
||||||
|
logging.info(buck_stats)
|
||||||
|
with open(os.path.join(args.infer_out, ANALYSIS_SUMMARY_OUTPUT), 'w') as f:
|
||||||
|
f.write(buck_stats)
|
||||||
|
|
||||||
|
all_rows = []
|
||||||
|
headers = []
|
||||||
|
stats = init_stats(args, start_time)
|
||||||
|
|
||||||
|
accumulation_whitelist = list(map(re.compile, [
|
||||||
|
'^cores$',
|
||||||
|
'^time$',
|
||||||
|
'^start_time$',
|
||||||
|
'.*_pc',
|
||||||
|
]))
|
||||||
|
|
||||||
|
expected_analyzer = stats['normal']['analyzer']
|
||||||
|
expected_version = stats['normal']['infer_version']
|
||||||
|
|
||||||
|
for root, _, files in os.walk(DEFAULT_BUCK_OUT_GEN):
|
||||||
|
for f in [f for f in files if f.endswith('.jar')]:
|
||||||
|
path = os.path.join(root, f)
|
||||||
|
try:
|
||||||
|
with zipfile.ZipFile(path) as jar:
|
||||||
|
# Accumulate integers and float values
|
||||||
|
target_stats = load_stats(jar)
|
||||||
|
|
||||||
|
found_analyzer = target_stats['normal']['analyzer']
|
||||||
|
found_version = target_stats['normal']['infer_version']
|
||||||
|
|
||||||
|
if (found_analyzer != expected_analyzer
|
||||||
|
or found_version != expected_version):
|
||||||
|
continue
|
||||||
|
else:
|
||||||
|
for type_k in ['int', 'float']:
|
||||||
|
items = target_stats.get(type_k, {}).items()
|
||||||
|
for key, value in items:
|
||||||
|
if not any(map(lambda r: r.match(key),
|
||||||
|
accumulation_whitelist)):
|
||||||
|
old_value = stats[type_k].get(key, 0)
|
||||||
|
stats[type_k][key] = old_value + value
|
||||||
|
|
||||||
|
rows = load_report(jar)
|
||||||
|
if len(rows) > 0:
|
||||||
|
headers.append(rows[0])
|
||||||
|
all_rows.extend(rows[1:])
|
||||||
|
|
||||||
|
# Override normals
|
||||||
|
stats['normal'].update(target_stats.get('normal', {}))
|
||||||
|
except NotFoundInJar:
|
||||||
|
pass
|
||||||
|
except zipfile.BadZipfile:
|
||||||
|
logging.warn('Bad zip file %s', path)
|
||||||
|
|
||||||
|
csv_report = os.path.join(args.infer_out, utils.CSV_REPORT_FILENAME)
|
||||||
|
bugs_out = os.path.join(args.infer_out, utils.BUGS_FILENAME)
|
||||||
|
|
||||||
|
if len(headers) == 0:
|
||||||
|
with open(csv_report, 'w'):
|
||||||
|
pass
|
||||||
|
logging.info('No reports found')
|
||||||
|
return
|
||||||
|
elif len(headers) > 1:
|
||||||
|
if any(map(lambda x: x != headers[0], headers)):
|
||||||
|
raise Exception('Inconsistent reports found')
|
||||||
|
|
||||||
|
# Convert all float values to integer values
|
||||||
|
for key, value in stats.get('float', {}).items():
|
||||||
|
stats['int'][key] = int(round(value))
|
||||||
|
|
||||||
|
# Delete the float entries before exporting the results
|
||||||
|
del(stats['float'])
|
||||||
|
|
||||||
|
with open(csv_report, 'w') as report:
|
||||||
|
writer = csv.writer(report)
|
||||||
|
writer.writerows([headers[0]] + rows_remove_duplicates(all_rows))
|
||||||
|
report.flush()
|
||||||
|
|
||||||
|
# export the CSV rows to JSON
|
||||||
|
utils.create_json_report(args.infer_out)
|
||||||
|
|
||||||
|
print('\n')
|
||||||
|
infer.print_errors(csv_report, bugs_out)
|
||||||
|
|
||||||
|
stats['int']['total_time'] = int(round(utils.elapsed_time(start_time)))
|
||||||
|
|
||||||
|
store_performances_csv(args.infer_out, stats)
|
||||||
|
|
||||||
|
stats_filename = os.path.join(args.infer_out, utils.STATS_FILENAME)
|
||||||
|
with open(stats_filename, 'w') as stats_out:
|
||||||
|
json.dump(stats, stats_out, indent=2)
|
||||||
|
|
||||||
|
basic_stats = get_basic_stats(stats)
|
||||||
|
|
||||||
|
if args.print_harness:
|
||||||
|
harness_code = get_harness_code()
|
||||||
|
basic_stats += harness_code
|
||||||
|
|
||||||
|
logging.info(basic_stats)
|
||||||
|
|
||||||
|
with open(os.path.join(args.infer_out, ANALYSIS_SUMMARY_OUTPUT), 'a') as f:
|
||||||
|
f.write(basic_stats)
|
||||||
|
|
||||||
|
|
||||||
|
def cleanup(temp_files):
|
||||||
|
"""Removes the generated .buckconfig.local and the temporary infer script.
|
||||||
|
"""
|
||||||
|
for file in [BUCK_CONFIG] + temp_files:
|
||||||
|
try:
|
||||||
|
logging.info('Removing %s' % file)
|
||||||
|
if os.path.isdir(file):
|
||||||
|
shutil.rmtree(file)
|
||||||
|
else:
|
||||||
|
os.unlink(file)
|
||||||
|
except IOError:
|
||||||
|
logging.error('Could not remove %s' % file)
|
||||||
|
|
||||||
|
if os.path.isfile(BUCK_CONFIG_BACKUP):
|
||||||
|
logging.info('Restoring %s', BUCK_CONFIG)
|
||||||
|
shutil.move(BUCK_CONFIG_BACKUP, BUCK_CONFIG)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
parser = argparse.ArgumentParser(parents=[infer.base_parser])
|
||||||
|
parser.add_argument('--verbose', action='store_true',
|
||||||
|
help='Print buck compilation steps')
|
||||||
|
parser.add_argument('--no-cache', action='store_true',
|
||||||
|
help='Do not use buck distributed cache')
|
||||||
|
parser.add_argument('--print-harness', action='store_true',
|
||||||
|
help='Print generated harness code (Android only)')
|
||||||
|
parser.add_argument('targets', nargs='*', metavar='target',
|
||||||
|
help='Build targets to analyze')
|
||||||
|
args = parser.parse_args()
|
||||||
|
|
||||||
|
utils.configure_logging(args.verbose)
|
||||||
|
timer = utils.Timer(logging.info)
|
||||||
|
temp_files = []
|
||||||
|
|
||||||
|
try:
|
||||||
|
start_time = time.time()
|
||||||
|
logging.info('Starting the analysis')
|
||||||
|
subprocess.check_call(
|
||||||
|
[utils.get_cmd_in_bin_dir('InferAnalyze'), '-version'])
|
||||||
|
|
||||||
|
if not os.path.isdir(args.infer_out):
|
||||||
|
os.mkdir(args.infer_out)
|
||||||
|
|
||||||
|
timer.start('Preparing build...')
|
||||||
|
temp_files += prepare_build(args)
|
||||||
|
timer.stop('Build prepared')
|
||||||
|
|
||||||
|
# TODO(t3786463) Start buckd.
|
||||||
|
|
||||||
|
timer.start('Computing library targets')
|
||||||
|
determine_library_targets(args)
|
||||||
|
timer.stop('%d targets computed', len(args.targets))
|
||||||
|
|
||||||
|
timer.start('Running buck...')
|
||||||
|
buck_cmd = ['buck', 'build']
|
||||||
|
if args.no_cache:
|
||||||
|
buck_cmd += ['--no-cache']
|
||||||
|
if args.verbose:
|
||||||
|
buck_cmd += ['-v', '2']
|
||||||
|
subprocess.check_call(buck_cmd + args.targets)
|
||||||
|
timer.stop('Buck finished')
|
||||||
|
|
||||||
|
timer.start('Collecting results...')
|
||||||
|
collect_results(args, start_time)
|
||||||
|
timer.stop('Done')
|
||||||
|
|
||||||
|
except KeyboardInterrupt as e:
|
||||||
|
timer.stop('Exiting')
|
||||||
|
sys.exit(0)
|
||||||
|
except Exception as e:
|
||||||
|
timer.stop('Failed')
|
||||||
|
logging.error('Caught %s: %s' % (e.__class__.__name__, str(e)))
|
||||||
|
logging.error(traceback.format_exc())
|
||||||
|
sys.exit(1)
|
||||||
|
finally:
|
||||||
|
cleanup(temp_files)
|
||||||
|
|
||||||
|
|
||||||
|
# vim: set sw=4 ts=4 et:
|
@ -0,0 +1,179 @@
|
|||||||
|
#!/usr/bin/env python2.7
|
||||||
|
|
||||||
|
import argparse
|
||||||
|
import imp
|
||||||
|
import json
|
||||||
|
import logging
|
||||||
|
import os
|
||||||
|
import platform
|
||||||
|
import sys
|
||||||
|
|
||||||
|
import inferlib
|
||||||
|
from inferlib import infer, utils
|
||||||
|
|
||||||
|
|
||||||
|
CAPTURE_PACKAGE = 'capture'
|
||||||
|
|
||||||
|
# token that identifies the end of the options for infer and the beginning
|
||||||
|
# of the compilation command
|
||||||
|
CMD_MARKER = '--'
|
||||||
|
|
||||||
|
# insert here the correspondence between module name and the list of
|
||||||
|
# compiler/build-systems it handles.
|
||||||
|
# All supported commands should be listed here
|
||||||
|
MODULE_TO_COMMAND = {
|
||||||
|
'ant': ['ant'],
|
||||||
|
'analyze': ['analyze'],
|
||||||
|
'buck': ['buck'],
|
||||||
|
'gradle': ['gradle', 'gradlew'],
|
||||||
|
'javac': ['javac'],
|
||||||
|
'make': ['make', 'clang', 'clang++', 'cc', 'gcc', 'g++'],
|
||||||
|
'xcodebuild': ['xcodebuild'],
|
||||||
|
'mvn': ['mvn']
|
||||||
|
}
|
||||||
|
|
||||||
|
FORMAT = '[%(levelname)s] %(message)s'
|
||||||
|
LOG_FILE = 'toplevel.log'
|
||||||
|
|
||||||
|
def get_commands():
|
||||||
|
"""Return all commands that are supported."""
|
||||||
|
#flatten and dedup the list of commands
|
||||||
|
return set(sum(MODULE_TO_COMMAND.values(), []))
|
||||||
|
|
||||||
|
|
||||||
|
def get_module_name(command):
|
||||||
|
""" Return module that is able to handle the command. None if
|
||||||
|
there is no such module."""
|
||||||
|
for module, commands in MODULE_TO_COMMAND.iteritems():
|
||||||
|
if command in commands:
|
||||||
|
return module
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
def load_module(mod_name):
|
||||||
|
pkg_info = imp.find_module(CAPTURE_PACKAGE, inferlib.__path__)
|
||||||
|
imported_pkg = imp.load_module(CAPTURE_PACKAGE, *pkg_info)
|
||||||
|
# load the requested module (e.g. make)
|
||||||
|
mod_file, mod_path, mod_descr = \
|
||||||
|
imp.find_module(mod_name, imported_pkg.__path__)
|
||||||
|
try:
|
||||||
|
return imp.load_module(
|
||||||
|
'{pkg}.{mod}'.format(pkg=imported_pkg.__name__, mod=mod_name),
|
||||||
|
mod_file, mod_path, mod_descr)
|
||||||
|
finally:
|
||||||
|
if mod_file:
|
||||||
|
mod_file.close()
|
||||||
|
|
||||||
|
|
||||||
|
def split_args_to_parse():
|
||||||
|
dd_index = \
|
||||||
|
sys.argv.index(CMD_MARKER) if CMD_MARKER in sys.argv else len(sys.argv)
|
||||||
|
return sys.argv[1:dd_index], sys.argv[dd_index + 1:]
|
||||||
|
|
||||||
|
|
||||||
|
def create_argparser(parents=[]):
|
||||||
|
parser = argparse.ArgumentParser(
|
||||||
|
parents=[infer.infer_parser] + parents,
|
||||||
|
add_help=False,
|
||||||
|
formatter_class=argparse.RawDescriptionHelpFormatter,
|
||||||
|
)
|
||||||
|
group = parser.add_argument_group(
|
||||||
|
'supported compiler/build-system commands')
|
||||||
|
|
||||||
|
supported_commands = ', '.join(get_commands())
|
||||||
|
group.add_argument(
|
||||||
|
CMD_MARKER,
|
||||||
|
metavar='<cmd>',
|
||||||
|
dest='nullarg',
|
||||||
|
default=None,
|
||||||
|
help=('Command to run the compiler/build-system. '
|
||||||
|
'Supported build commands (run `infer --help -- <cmd_name>` for '
|
||||||
|
'extra help, e.g. `infer --help -- javac`): ' + supported_commands),
|
||||||
|
)
|
||||||
|
return parser
|
||||||
|
|
||||||
|
|
||||||
|
def configure_logging(infer_dir, log_to_stderr):
|
||||||
|
if log_to_stderr:
|
||||||
|
logging.basicConfig(level=logging.INFO, format=FORMAT)
|
||||||
|
else:
|
||||||
|
logging.basicConfig(level=logging.INFO,
|
||||||
|
format=FORMAT,
|
||||||
|
filename=os.path.join(infer_dir, LOG_FILE),
|
||||||
|
filemode='w')
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
to_parse, cmd = split_args_to_parse()
|
||||||
|
# get the module name (if any), then load it
|
||||||
|
capture_module_name = os.path.basename(cmd[0]) if len(cmd) > 0 else None
|
||||||
|
mod_name = get_module_name(capture_module_name)
|
||||||
|
imported_module = None
|
||||||
|
if mod_name:
|
||||||
|
# There is module that supports the command
|
||||||
|
imported_module = load_module(mod_name)
|
||||||
|
|
||||||
|
# get the module's argparser and merge it with the global argparser
|
||||||
|
module_argparser = []
|
||||||
|
if imported_module:
|
||||||
|
module_argparser.append(
|
||||||
|
imported_module.create_argparser(capture_module_name)
|
||||||
|
)
|
||||||
|
global_argparser = create_argparser(module_argparser)
|
||||||
|
|
||||||
|
args = global_argparser.parse_args(to_parse)
|
||||||
|
|
||||||
|
if (imported_module and not args.incremental and
|
||||||
|
capture_module_name != 'analyze'):
|
||||||
|
infer.remove_infer_out(args.infer_out)
|
||||||
|
|
||||||
|
infer.create_results_dir(args.infer_out)
|
||||||
|
|
||||||
|
configure_logging(args.infer_out, args.log_to_stderr)
|
||||||
|
logging.info('Running command %s', ' '.join(sys.argv))
|
||||||
|
logging.info('Path to infer script %s (%s)', __file__,
|
||||||
|
os.path.realpath(__file__))
|
||||||
|
logging.info(infer.get_infer_version())
|
||||||
|
logging.info('Platform: %s', platform.platform())
|
||||||
|
logging.info('PATH=%s', os.getenv('PATH'))
|
||||||
|
logging.info('SHELL=%s', os.getenv('SHELL'))
|
||||||
|
logging.info('PWD=%s', os.getenv('PWD'))
|
||||||
|
|
||||||
|
if imported_module:
|
||||||
|
capture_exitcode = imported_module.gen_instance(args, cmd).capture()
|
||||||
|
if capture_exitcode != os.EX_OK:
|
||||||
|
logging.error('Error during capture phase, exiting')
|
||||||
|
exit(capture_exitcode)
|
||||||
|
logging.info('Capture phase was successful')
|
||||||
|
elif capture_module_name is not None:
|
||||||
|
# There was a command, but it's not supported
|
||||||
|
print('Command "{cmd}" not recognised'.format(
|
||||||
|
cmd='' if capture_module_name is None else capture_module_name))
|
||||||
|
global_argparser.print_help()
|
||||||
|
sys.exit(1)
|
||||||
|
else:
|
||||||
|
global_argparser.print_help()
|
||||||
|
sys.exit(os.EX_OK)
|
||||||
|
|
||||||
|
if not (mod_name == 'buck' or mod_name == 'javac'):
|
||||||
|
# Something should be already captured, otherwise analysis would fail
|
||||||
|
if not os.path.exists(os.path.join(args.infer_out, 'captured')):
|
||||||
|
print('There was nothing to analyze, exiting')
|
||||||
|
exit(os.EX_USAGE)
|
||||||
|
analysis = infer.Infer(args, [])
|
||||||
|
analysis.analyze_and_report()
|
||||||
|
analysis.save_stats()
|
||||||
|
|
||||||
|
if args.fail_on_bug:
|
||||||
|
bugs_filename = os.path.join(args.infer_out,
|
||||||
|
utils.JSON_REPORT_FILENAME)
|
||||||
|
try:
|
||||||
|
with open(bugs_filename) as bugs_file:
|
||||||
|
bugs = json.load(bugs_file)
|
||||||
|
if len(bugs) > 0:
|
||||||
|
sys.exit(infer.BUG_FOUND_ERROR_CODE)
|
||||||
|
except OSError:
|
||||||
|
pass
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
main()
|
@ -0,0 +1,397 @@
|
|||||||
|
#!/usr/bin/env python2.7
|
||||||
|
|
||||||
|
# Copyright (c) 2013 - present Facebook, Inc.
|
||||||
|
# All rights reserved.
|
||||||
|
#
|
||||||
|
# This source code is licensed under the BSD style license found in the
|
||||||
|
# LICENSE file in the root directory of this source tree. An additional grant
|
||||||
|
# of patent rights can be found in the PATENTS file in the same directory.
|
||||||
|
|
||||||
|
from __future__ import absolute_import
|
||||||
|
from __future__ import division
|
||||||
|
from __future__ import print_function
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
|
import argparse
|
||||||
|
import json
|
||||||
|
import os
|
||||||
|
import re
|
||||||
|
import shutil
|
||||||
|
import subprocess
|
||||||
|
import sys
|
||||||
|
|
||||||
|
from inferlib import infer, utils
|
||||||
|
|
||||||
|
HTML_REPORT_DIR = 'report.html'
|
||||||
|
TRACES_REPORT_DIR = 'traces'
|
||||||
|
SOURCE_REMOTE_GITHUB_URL_TEMPLATE = ('https://github.com/{project}/blob/'
|
||||||
|
'{hash}/{relative-path}/'
|
||||||
|
'{file-name}#L{line-number}')
|
||||||
|
SOURCE_REMOTE_GITHUB_RE = re.compile('.*github.com[:/](?P<project>.*)')
|
||||||
|
|
||||||
|
|
||||||
|
base_parser = argparse.ArgumentParser(
|
||||||
|
description='Explore the error traces in Infer reports.')
|
||||||
|
base_parser.add_argument('-o', '--out', metavar='<directory>',
|
||||||
|
default=utils.DEFAULT_INFER_OUT, dest='infer_out',
|
||||||
|
action=utils.AbsolutePathAction,
|
||||||
|
help='Set the Infer results directory')
|
||||||
|
base_parser.add_argument('--only-show',
|
||||||
|
action='store_true',
|
||||||
|
help='Show the list of reports and exit')
|
||||||
|
base_parser.add_argument('--no-source',
|
||||||
|
action='store_true',
|
||||||
|
help='Do not print code excerpts')
|
||||||
|
base_parser.add_argument('--select',
|
||||||
|
metavar='N',
|
||||||
|
nargs=1,
|
||||||
|
help='Select bug number N. '
|
||||||
|
'If omitted, prompts you for input.')
|
||||||
|
base_parser.add_argument('--max-level',
|
||||||
|
metavar='N',
|
||||||
|
nargs=1,
|
||||||
|
help='Level of nested procedure calls to show. '
|
||||||
|
'Can be "max", in which case all levels are shown. '
|
||||||
|
'If omitted, prompts you for input.')
|
||||||
|
base_parser.add_argument('--html',
|
||||||
|
action='store_true',
|
||||||
|
help='Generate HTML report.')
|
||||||
|
|
||||||
|
|
||||||
|
def describe_report(report, indent=0):
|
||||||
|
filename = report['file']
|
||||||
|
kind = report['kind']
|
||||||
|
line = report['line']
|
||||||
|
error_type = report['type']
|
||||||
|
msg = report['qualifier']
|
||||||
|
return '{0}:{1}: {2}: {3}\n {4}{5}\n'.format(
|
||||||
|
filename,
|
||||||
|
line,
|
||||||
|
kind.lower(),
|
||||||
|
error_type,
|
||||||
|
' ' * indent,
|
||||||
|
msg,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def show_error_and_exit(err, show_help):
|
||||||
|
print(err)
|
||||||
|
if show_help:
|
||||||
|
print('')
|
||||||
|
base_parser.print_help()
|
||||||
|
exit(1)
|
||||||
|
|
||||||
|
|
||||||
|
class Tracer(object):
|
||||||
|
def __init__(self, args, level=sys.maxsize):
|
||||||
|
self.args = args
|
||||||
|
self.max_level = level
|
||||||
|
self.indenter = utils.Indenter()
|
||||||
|
|
||||||
|
def build_node_tags(self, node):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def build_node(self, node):
|
||||||
|
if node['level'] > self.max_level:
|
||||||
|
return
|
||||||
|
|
||||||
|
report_line = node['line_number']
|
||||||
|
fname = node['filename']
|
||||||
|
|
||||||
|
self.indenter.newline()
|
||||||
|
self.indenter.add('%s:%d: %s' % (fname,
|
||||||
|
report_line,
|
||||||
|
node['description']))
|
||||||
|
self.indenter.newline()
|
||||||
|
|
||||||
|
if not self.args.no_source:
|
||||||
|
self.indenter.indent_push(node['level'])
|
||||||
|
self.indenter.add(utils.build_source_context(fname, report_line))
|
||||||
|
self.indenter.indent_pop()
|
||||||
|
self.indenter.newline()
|
||||||
|
|
||||||
|
def build_trace(self, trace):
|
||||||
|
total_nodes = len(trace)
|
||||||
|
hidden_nodes = len([None for n in trace if n['level'] > self.max_level])
|
||||||
|
shown_nodes = total_nodes - hidden_nodes
|
||||||
|
hidden_str = ''
|
||||||
|
all_str = 'all '
|
||||||
|
if hidden_nodes > 0:
|
||||||
|
hidden_str = ' (%d steps too deeply nested)' % hidden_nodes
|
||||||
|
all_str = ''
|
||||||
|
self.indenter.add('Showing %s%d steps of the trace%s\n\n'
|
||||||
|
% (all_str, shown_nodes, hidden_str))
|
||||||
|
self.indenter.newline()
|
||||||
|
|
||||||
|
for node in trace:
|
||||||
|
self.build_node(node)
|
||||||
|
|
||||||
|
def build_report(self, report):
|
||||||
|
traces = json.loads(report['trace'])
|
||||||
|
self.build_trace(traces['trace'])
|
||||||
|
|
||||||
|
def __str__(self):
|
||||||
|
return str(self.indenter)
|
||||||
|
|
||||||
|
|
||||||
|
class Selector(object):
|
||||||
|
def __init__(self, args, reports):
|
||||||
|
self.args = args
|
||||||
|
|
||||||
|
def has_trace(report):
|
||||||
|
trace = json.loads(report['trace'])
|
||||||
|
return len(trace['trace']) > 0
|
||||||
|
self.reports = [report for report in reports if has_trace(report)]
|
||||||
|
|
||||||
|
def show_choices(self):
|
||||||
|
n = 0
|
||||||
|
n_length = len(str(len(self)))
|
||||||
|
for report in self.reports:
|
||||||
|
print(str(n).rjust(n_length) + '. ' +
|
||||||
|
describe_report(report, n_length + 2))
|
||||||
|
n += 1
|
||||||
|
|
||||||
|
def prompt_report(self):
|
||||||
|
report_number = 0
|
||||||
|
if self.args.select is not None:
|
||||||
|
report_number = self.parse_report_number(self.args.select[0], True)
|
||||||
|
else:
|
||||||
|
self.show_choices()
|
||||||
|
|
||||||
|
if len(self) > 1:
|
||||||
|
report_number_str = raw_input(
|
||||||
|
'Choose report to display (default=0): ')
|
||||||
|
if report_number_str != '':
|
||||||
|
report_number = self.parse_report_number(report_number_str)
|
||||||
|
elif len(self) == 1:
|
||||||
|
print('Auto-selecting the only report.')
|
||||||
|
|
||||||
|
return self.reports[report_number]
|
||||||
|
|
||||||
|
def prompt_level(self):
|
||||||
|
if self.args.max_level is not None:
|
||||||
|
return self.parse_max_level(self.args.max_level[0], True)
|
||||||
|
|
||||||
|
max_level_str = raw_input(
|
||||||
|
'Choose maximum level of nested procedures calls (default=max): ')
|
||||||
|
if max_level_str == '':
|
||||||
|
max_level = sys.maxsize
|
||||||
|
else:
|
||||||
|
max_level = self.parse_max_level(max_level_str)
|
||||||
|
|
||||||
|
print('')
|
||||||
|
|
||||||
|
return max_level
|
||||||
|
|
||||||
|
def parse_report_number(self, s, show_help=False):
|
||||||
|
try:
|
||||||
|
n = int(s)
|
||||||
|
except ValueError:
|
||||||
|
show_error_and_exit(
|
||||||
|
'ERROR: integer report number expected',
|
||||||
|
show_help)
|
||||||
|
|
||||||
|
if n >= len(self) or n < 0:
|
||||||
|
show_error_and_exit('ERROR: invalid report number.', show_help)
|
||||||
|
|
||||||
|
return n
|
||||||
|
|
||||||
|
def parse_max_level(self, s, show_help=False):
|
||||||
|
if s == 'max':
|
||||||
|
return sys.maxsize
|
||||||
|
|
||||||
|
try:
|
||||||
|
n = int(s)
|
||||||
|
except ValueError:
|
||||||
|
show_error_and_exit(
|
||||||
|
'ERROR: integer max level or "max" expected',
|
||||||
|
show_help)
|
||||||
|
|
||||||
|
if n < 0:
|
||||||
|
show_error_and_exit('ERROR: invalid max level.', show_help)
|
||||||
|
|
||||||
|
return n
|
||||||
|
|
||||||
|
def __len__(self):
|
||||||
|
return len(self.reports)
|
||||||
|
|
||||||
|
def __iter__(self):
|
||||||
|
return self.reports.__iter__()
|
||||||
|
|
||||||
|
def __next__(self):
|
||||||
|
return self.reports.__next__()
|
||||||
|
|
||||||
|
|
||||||
|
def path_of_bug_number(traces_dir, i):
|
||||||
|
return os.path.join(traces_dir, 'bug_%d.txt' % (i+1))
|
||||||
|
|
||||||
|
|
||||||
|
def url_of_bug_number(i):
|
||||||
|
return '%s/bug_%d.txt' % (TRACES_REPORT_DIR, i+1)
|
||||||
|
|
||||||
|
|
||||||
|
def get_remote_source_template():
|
||||||
|
"""Return a template that given 'file-name' and 'line-number' entries
|
||||||
|
gives a remote url to that source location. Return the empty
|
||||||
|
template if no remote source has been detected. Currently only
|
||||||
|
detects GitHub projects.
|
||||||
|
"""
|
||||||
|
# see if we are in a GitHub project clone
|
||||||
|
try:
|
||||||
|
git_remote = subprocess.check_output(
|
||||||
|
['git',
|
||||||
|
'config',
|
||||||
|
'--get',
|
||||||
|
'remote.origin.url']).decode().strip()
|
||||||
|
m = SOURCE_REMOTE_GITHUB_RE.match(git_remote)
|
||||||
|
if m is not None:
|
||||||
|
project = m.group('project')
|
||||||
|
# some remotes end in .git, but the http urls don't have
|
||||||
|
# these
|
||||||
|
if project.endswith('.git'):
|
||||||
|
project = project[:-len('.git')]
|
||||||
|
print('Detected GitHub project %s' % project)
|
||||||
|
hash = subprocess.check_output(
|
||||||
|
['git',
|
||||||
|
'rev-parse',
|
||||||
|
'HEAD']).decode().strip()
|
||||||
|
root = subprocess.check_output(
|
||||||
|
['git',
|
||||||
|
'rev-parse',
|
||||||
|
'--show-toplevel']).decode().strip()
|
||||||
|
# FIXME(t8921813): we should have a way to get absolute
|
||||||
|
# paths in traces. In the meantime, trust that we run from
|
||||||
|
# the same directory from which infer was run.
|
||||||
|
relative_path = os.path.relpath(os.getcwd(), root)
|
||||||
|
d = {
|
||||||
|
'project': project,
|
||||||
|
'hash': hash,
|
||||||
|
'relative-path': relative_path,
|
||||||
|
'file-name': '{file-name}',
|
||||||
|
'line-number': '{line-number}',
|
||||||
|
}
|
||||||
|
return SOURCE_REMOTE_GITHUB_URL_TEMPLATE.format(**d)
|
||||||
|
except subprocess.CalledProcessError:
|
||||||
|
pass
|
||||||
|
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
def html_bug_trace(args, report, bug_id):
|
||||||
|
bug_trace = ''
|
||||||
|
bug_trace += '%s\n' % describe_report(report)
|
||||||
|
tracer = Tracer(args)
|
||||||
|
tracer.build_report(report)
|
||||||
|
bug_trace += str(tracer)
|
||||||
|
return bug_trace
|
||||||
|
|
||||||
|
|
||||||
|
def html_list_of_bugs(args, remote_source_template, selector):
|
||||||
|
template = '\n'.join([
|
||||||
|
'<html>',
|
||||||
|
'<head>',
|
||||||
|
'<title>Infer found {num-bugs} bugs</title>',
|
||||||
|
'</head>',
|
||||||
|
'<body>',
|
||||||
|
'<h2>List of bugs found</h2>',
|
||||||
|
'{list-of-bugs}',
|
||||||
|
'</body>',
|
||||||
|
'</html>',
|
||||||
|
])
|
||||||
|
|
||||||
|
report_template = '\n'.join([
|
||||||
|
'<li>',
|
||||||
|
'{description}',
|
||||||
|
'({source-uri}<a href="{trace-url}">trace</a>)',
|
||||||
|
'</li>',
|
||||||
|
])
|
||||||
|
|
||||||
|
def source_uri(report):
|
||||||
|
d = {
|
||||||
|
'file-name': report['file'],
|
||||||
|
'line-number': report['line'],
|
||||||
|
}
|
||||||
|
if remote_source_template is not None:
|
||||||
|
link = remote_source_template.format(**d)
|
||||||
|
return '<a href="%s">source</a> | ' % link
|
||||||
|
return ''
|
||||||
|
|
||||||
|
i = 0
|
||||||
|
list_of_bugs = '<ol>'
|
||||||
|
for report in selector:
|
||||||
|
d = {
|
||||||
|
'description': describe_report(report, 2),
|
||||||
|
'trace-url': url_of_bug_number(i),
|
||||||
|
'source-uri': source_uri(report),
|
||||||
|
}
|
||||||
|
list_of_bugs += report_template.format(**d)
|
||||||
|
i += 1
|
||||||
|
list_of_bugs += '</ol>'
|
||||||
|
|
||||||
|
d = {
|
||||||
|
'num-bugs': len(selector),
|
||||||
|
'list-of-bugs': list_of_bugs,
|
||||||
|
}
|
||||||
|
return template.format(**d)
|
||||||
|
|
||||||
|
|
||||||
|
def generate_html_report(args, reports):
|
||||||
|
html_dir = os.path.join(args.infer_out, HTML_REPORT_DIR)
|
||||||
|
shutil.rmtree(html_dir, True)
|
||||||
|
infer.mkdir_if_not_exists(html_dir)
|
||||||
|
|
||||||
|
traces_dir = os.path.join(html_dir, TRACES_REPORT_DIR)
|
||||||
|
infer.mkdir_if_not_exists(traces_dir)
|
||||||
|
|
||||||
|
sel = Selector(args, reports)
|
||||||
|
|
||||||
|
i = 0
|
||||||
|
for bug in sel:
|
||||||
|
bug_trace_path = path_of_bug_number(traces_dir, i)
|
||||||
|
with open(bug_trace_path, 'w') as bug_trace_file:
|
||||||
|
bug_trace_file.write(html_bug_trace(args, bug, i))
|
||||||
|
i += 1
|
||||||
|
|
||||||
|
remote_source_template = get_remote_source_template()
|
||||||
|
bug_list_path = os.path.join(html_dir, 'index.html')
|
||||||
|
with open(bug_list_path, 'w') as bug_list_file:
|
||||||
|
bug_list_file.write(html_list_of_bugs(args,
|
||||||
|
remote_source_template,
|
||||||
|
sel))
|
||||||
|
|
||||||
|
print('Saved html report in:\n%s' % bug_list_path)
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
args = base_parser.parse_args()
|
||||||
|
|
||||||
|
report_filename = os.path.join(args.infer_out, utils.JSON_REPORT_FILENAME)
|
||||||
|
with open(report_filename) as report_file:
|
||||||
|
reports = json.load(report_file)
|
||||||
|
|
||||||
|
if args.html:
|
||||||
|
generate_html_report(args, reports)
|
||||||
|
exit(0)
|
||||||
|
|
||||||
|
sel = Selector(args, reports)
|
||||||
|
|
||||||
|
if len(sel) == 0:
|
||||||
|
print('No issues found')
|
||||||
|
exit(0)
|
||||||
|
|
||||||
|
if args.only_show:
|
||||||
|
sel.show_choices()
|
||||||
|
exit(0)
|
||||||
|
|
||||||
|
report = sel.prompt_report()
|
||||||
|
max_level = sel.prompt_level()
|
||||||
|
|
||||||
|
print(describe_report(report))
|
||||||
|
|
||||||
|
tracer = Tracer(args, max_level)
|
||||||
|
tracer.build_report(report)
|
||||||
|
print(tracer)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
main()
|
Loading…
Reference in new issue