diff --git a/infer/lib/python/inferTraceBugs b/infer/lib/python/inferTraceBugs index 6a95e0b65..58c651ccb 100755 --- a/infer/lib/python/inferTraceBugs +++ b/infer/lib/python/inferTraceBugs @@ -21,7 +21,7 @@ import shutil import subprocess import sys -from inferlib import config, source, utils +from inferlib import config, issues, source, utils HTML_REPORT_DIR = 'report.html' TRACES_REPORT_DIR = 'traces' @@ -60,11 +60,11 @@ base_parser.add_argument('--html', def describe_report(report, indent=0): - filename = report[utils.JSON_INDEX_FILENAME] - kind = report[utils.JSON_INDEX_KIND] - line = report[utils.JSON_INDEX_LINE] - error_type = report[utils.JSON_INDEX_TYPE] - msg = report[utils.JSON_INDEX_QUALIFIER] + filename = report[issues.JSON_INDEX_FILENAME] + kind = report[issues.JSON_INDEX_KIND] + line = report[issues.JSON_INDEX_LINE] + error_type = report[issues.JSON_INDEX_TYPE] + msg = report[issues.JSON_INDEX_QUALIFIER] return '{0}:{1}: {2}: {3}\n {4}{5}\n'.format( filename, line, @@ -93,22 +93,22 @@ class Tracer(object): pass def build_node(self, node): - if node[utils.JSON_INDEX_TRACE_LEVEL] > self.max_level: + if node[issues.JSON_INDEX_TRACE_LEVEL] > self.max_level: return - report_line = node[utils.JSON_INDEX_TRACE_LINE] - fname = node[utils.JSON_INDEX_TRACE_FILENAME] + report_line = node[issues.JSON_INDEX_TRACE_LINE] + fname = node[issues.JSON_INDEX_TRACE_FILENAME] self.indenter.newline() self.indenter.add('%s:%d: %s' % ( fname, report_line, - node[utils.JSON_INDEX_TRACE_DESCRIPTION], + node[issues.JSON_INDEX_TRACE_DESCRIPTION], )) self.indenter.newline() if not self.args.no_source: - self.indenter.indent_push(node[utils.JSON_INDEX_TRACE_LEVEL]) + self.indenter.indent_push(node[issues.JSON_INDEX_TRACE_LEVEL]) mode = source.TERMINAL_FORMATTER if self.args.html: mode = source.PLAIN_FORMATTER @@ -121,7 +121,7 @@ class Tracer(object): def build_trace(self, trace): total_nodes = len(trace) hidden_nodes = len( - filter(lambda n: n[utils.JSON_INDEX_TRACE_LEVEL] > self.max_level, + filter(lambda n: n[issues.JSON_INDEX_TRACE_LEVEL] > self.max_level, trace)) shown_nodes = total_nodes - hidden_nodes hidden_str = '' @@ -137,7 +137,7 @@ class Tracer(object): self.build_node(node) def build_report(self, report): - self.build_trace(report[utils.JSON_INDEX_TRACE]) + self.build_trace(report[issues.JSON_INDEX_TRACE]) def __str__(self): return str(self.indenter) @@ -148,7 +148,7 @@ class Selector(object): self.args = args def has_trace(report): - return len(report[utils.JSON_INDEX_TRACE]) > 0 + return len(report[issues.JSON_INDEX_TRACE]) > 0 self.reports = [report for report in reports if has_trace(report)] def show_choices(self): @@ -316,8 +316,8 @@ def html_list_of_bugs(args, remote_source_template, selector): def source_uri(report): d = { - 'file-name': report[utils.JSON_INDEX_FILENAME], - 'line-number': report[utils.JSON_INDEX_LINE], + 'file-name': report[issues.JSON_INDEX_FILENAME], + 'line-number': report[issues.JSON_INDEX_LINE], } if remote_source_template is not None: link = remote_source_template.format(**d) diff --git a/infer/lib/python/inferlib/analyze.py b/infer/lib/python/inferlib/analyze.py index d521167ba..366d99b9a 100644 --- a/infer/lib/python/inferlib/analyze.py +++ b/infer/lib/python/inferlib/analyze.py @@ -454,7 +454,7 @@ class Infer: reader = utils.locale_csv_reader(file_in) rows = [row for row in reader][1:] for row in rows: - key = row[utils.CSV_INDEX_TYPE] + key = row[issues.CSV_INDEX_TYPE] previous_value = self.stats['int'].get(key, 0) self.stats['int'][key] = previous_value + 1 diff --git a/infer/lib/python/inferlib/issues.py b/infer/lib/python/inferlib/issues.py index 2746c61cc..679528083 100644 --- a/infer/lib/python/inferlib/issues.py +++ b/infer/lib/python/inferlib/issues.py @@ -53,6 +53,46 @@ NULL_STYLE_ISSUE_TYPES = [ 'PREMATURE_NIL_TERMINATION_ARGUMENT', ] +# indices in rows of csv reports +CSV_INDEX_CLASS = 0 +CSV_INDEX_KIND = 1 +CSV_INDEX_TYPE = 2 +CSV_INDEX_QUALIFIER = 3 +CSV_INDEX_SEVERITY = 4 +CSV_INDEX_LINE = 5 +CSV_INDEX_PROCEDURE = 6 +CSV_INDEX_PROCEDURE_ID = 7 +CSV_INDEX_FILENAME = 8 +CSV_INDEX_TRACE = 9 +CSV_INDEX_KEY = 10 +CSV_INDEX_QUALIFIER_TAGS = 11 +CSV_INDEX_HASH = 12 +CSV_INDEX_BUG_ID = 13 +CSV_INDEX_ALWAYS_REPORT = 14 +CSV_INDEX_ADVICE = 15 + +# field names in rows of json reports +JSON_INDEX_FILENAME = 'file' +JSON_INDEX_HASH = 'hash' +JSON_INDEX_KIND = 'kind' +JSON_INDEX_LINE = 'line' +JSON_INDEX_PROCEDURE = 'procedure' +JSON_INDEX_QUALIFIER = 'qualifier' +JSON_INDEX_QUALIFIER_TAGS = 'qualifier_tags' +JSON_INDEX_SEVERITY = 'file' +JSON_INDEX_TYPE = 'bug_type' +JSON_INDEX_TRACE = 'bug_trace' +JSON_INDEX_TRACE_LEVEL = 'level' +JSON_INDEX_TRACE_FILENAME = 'filename' +JSON_INDEX_TRACE_LINE = 'line_number' +JSON_INDEX_TRACE_DESCRIPTION = 'description' +JSON_INDEX_TRACE_NODE_TAGS = 'node_tags' +JSON_INDEX_TRACE_NODE_TAGS_TAG = 'tags' +JSON_INDEX_TRACE_NODE_TAGS_VALUE = 'value' + +QUALIFIER_TAGS = 'qualifier_tags' +BUCKET_TAGS = 'bucket' + def clean_csv(args, csv_report): collected_rows = [] @@ -63,7 +103,7 @@ def clean_csv(args, csv_report): return rows else: for row in rows[1:]: - filename = row[utils.CSV_INDEX_FILENAME] + filename = row[CSV_INDEX_FILENAME] if os.path.isfile(filename): if args.no_filtering \ or _should_report_csv(args.analyzer, row): @@ -85,7 +125,7 @@ def clean_json(args, json_report): with open(json_report, 'r') as file_in: rows = json.load(file_in) for row in rows: - filename = row[utils.JSON_INDEX_FILENAME] + filename = row[JSON_INDEX_FILENAME] if os.path.isfile(filename): if args.no_filtering \ or _should_report_json(args.analyzer, row): @@ -104,19 +144,19 @@ def print_errors(json_report, bugs_out): with codecs.open(json_report, 'r', encoding=config.LOCALE) as file_in: errors = json.load(file_in) - errors = filter(lambda row: row[utils.JSON_INDEX_KIND] in + errors = filter(lambda row: row[JSON_INDEX_KIND] in [ISSUE_KIND_ERROR, ISSUE_KIND_WARNING], errors) with codecs.open(bugs_out, 'w', encoding=config.LOCALE) as file_out: text_errors_list = [] for row in errors: - filename = row[utils.JSON_INDEX_FILENAME] + filename = row[JSON_INDEX_FILENAME] if os.path.isfile(filename): - kind = row[utils.JSON_INDEX_KIND] - line = row[utils.JSON_INDEX_LINE] - error_type = row[utils.JSON_INDEX_TYPE] - msg = row[utils.JSON_INDEX_QUALIFIER] + kind = row[JSON_INDEX_KIND] + line = row[JSON_INDEX_LINE] + error_type = row[JSON_INDEX_TYPE] + msg = row[JSON_INDEX_QUALIFIER] indenter = source.Indenter() indenter.indent_push() indenter.add( @@ -154,18 +194,18 @@ def _compare_issues(filename_1, line_1, filename_2, line_2): def _compare_csv_rows(row_1, row_2): - filename_1 = row_1[utils.CSV_INDEX_FILENAME] - filename_2 = row_2[utils.CSV_INDEX_FILENAME] - line_1 = int(row_1[utils.CSV_INDEX_LINE]) - line_2 = int(row_2[utils.CSV_INDEX_LINE]) + filename_1 = row_1[CSV_INDEX_FILENAME] + filename_2 = row_2[CSV_INDEX_FILENAME] + line_1 = int(row_1[CSV_INDEX_LINE]) + line_2 = int(row_2[CSV_INDEX_LINE]) return _compare_issues(filename_1, line_1, filename_2, line_2) def _compare_json_rows(row_1, row_2): - filename_1 = row_1[utils.JSON_INDEX_FILENAME] - filename_2 = row_2[utils.JSON_INDEX_FILENAME] - line_1 = row_1[utils.JSON_INDEX_LINE] - line_2 = row_2[utils.JSON_INDEX_LINE] + filename_1 = row_1[JSON_INDEX_FILENAME] + filename_2 = row_2[JSON_INDEX_FILENAME] + line_1 = row_1[JSON_INDEX_LINE] + line_2 = row_2[JSON_INDEX_LINE] return _compare_issues(filename_1, line_1, filename_2, line_2) @@ -194,14 +234,14 @@ def _should_report(analyzer, error_kind, error_type, error_bucket): def _should_report_csv(analyzer, row): - error_kind = row[utils.CSV_INDEX_KIND] - error_type = row[utils.CSV_INDEX_TYPE] + error_kind = row[CSV_INDEX_KIND] + error_type = row[CSV_INDEX_TYPE] error_bucket = '' # can be updated later once we extract it from qualifier try: - qualifier_xml = ET.fromstring(row[utils.CSV_INDEX_QUALIFIER_TAGS]) - if qualifier_xml.tag == utils.QUALIFIER_TAGS: - bucket = qualifier_xml.find(utils.BUCKET_TAGS) + qualifier_xml = ET.fromstring(row[CSV_INDEX_QUALIFIER_TAGS]) + if qualifier_xml.tag == QUALIFIER_TAGS: + bucket = qualifier_xml.find(BUCKET_TAGS) if bucket is not None: error_bucket = bucket.text except ET.ParseError: @@ -211,12 +251,12 @@ def _should_report_csv(analyzer, row): def _should_report_json(analyzer, row): - error_kind = row[utils.JSON_INDEX_KIND] - error_type = row[utils.JSON_INDEX_TYPE] + error_kind = row[JSON_INDEX_KIND] + error_type = row[JSON_INDEX_TYPE] error_bucket = '' # can be updated later once we extract it from qualifier - for qual_tag in row[utils.QUALIFIER_TAGS]: - if qual_tag['tag'] == utils.BUCKET_TAGS: + for qual_tag in row[QUALIFIER_TAGS]: + if qual_tag['tag'] == BUCKET_TAGS: error_bucket = qual_tag['value'] break diff --git a/infer/lib/python/inferlib/utils.py b/infer/lib/python/inferlib/utils.py index d84a656fc..3bbd5de16 100644 --- a/infer/lib/python/inferlib/utils.py +++ b/infer/lib/python/inferlib/utils.py @@ -18,7 +18,6 @@ import gzip import json import logging import os -import re import subprocess import sys import tempfile @@ -27,46 +26,6 @@ import time from . import config -# indices in rows of csv reports -CSV_INDEX_CLASS = 0 -CSV_INDEX_KIND = 1 -CSV_INDEX_TYPE = 2 -CSV_INDEX_QUALIFIER = 3 -CSV_INDEX_SEVERITY = 4 -CSV_INDEX_LINE = 5 -CSV_INDEX_PROCEDURE = 6 -CSV_INDEX_PROCEDURE_ID = 7 -CSV_INDEX_FILENAME = 8 -CSV_INDEX_TRACE = 9 -CSV_INDEX_KEY = 10 -CSV_INDEX_QUALIFIER_TAGS = 11 -CSV_INDEX_HASH = 12 -CSV_INDEX_BUG_ID = 13 -CSV_INDEX_ALWAYS_REPORT = 14 -CSV_INDEX_ADVICE = 15 - -# field names in rows of json reports -JSON_INDEX_FILENAME = 'file' -JSON_INDEX_HASH = 'hash' -JSON_INDEX_KIND = 'kind' -JSON_INDEX_LINE = 'line' -JSON_INDEX_PROCEDURE = 'procedure' -JSON_INDEX_QUALIFIER = 'qualifier' -JSON_INDEX_QUALIFIER_TAGS = 'qualifier_tags' -JSON_INDEX_SEVERITY = 'file' -JSON_INDEX_TYPE = 'bug_type' -JSON_INDEX_TRACE = 'bug_trace' -JSON_INDEX_TRACE_LEVEL = 'level' -JSON_INDEX_TRACE_FILENAME = 'filename' -JSON_INDEX_TRACE_LINE = 'line_number' -JSON_INDEX_TRACE_DESCRIPTION = 'description' -JSON_INDEX_TRACE_NODE_TAGS = 'node_tags' -JSON_INDEX_TRACE_NODE_TAGS_TAG = 'tags' -JSON_INDEX_TRACE_NODE_TAGS_VALUE = 'value' - -QUALIFIER_TAGS = 'qualifier_tags' -BUCKET_TAGS = 'bucket' - FORMAT = '[%(levelname)s] %(message)s' DEBUG_FORMAT = '[%(levelname)s:%(filename)s:%(lineno)03d] %(message)s' diff --git a/scripts/build_integration_tests.py b/scripts/build_integration_tests.py index ab8716c4c..c9bdbc93e 100755 --- a/scripts/build_integration_tests.py +++ b/scripts/build_integration_tests.py @@ -19,7 +19,7 @@ sys.path.insert(0, os.path.join(SCRIPTS_DIRECTORY, os.pardir, 'infer', 'lib', 'python')) -from inferlib import config, utils +from inferlib import issues CURRENT_DIR = os.getcwd() @@ -30,9 +30,9 @@ INFER_EXECUTABLE = 'infer' RECORD_ENV = 'INFER_RECORD_INTEGRATION_TESTS' REPORT_FIELDS = [ - utils.JSON_INDEX_FILENAME, - utils.JSON_INDEX_PROCEDURE, - utils.JSON_INDEX_TYPE, + issues.JSON_INDEX_FILENAME, + issues.JSON_INDEX_PROCEDURE, + issues.JSON_INDEX_TYPE, ] @@ -45,12 +45,12 @@ def quote(s): def string_of_error(e): - if utils.JSON_INDEX_LINE in e: - line = ' on line %s ' % e[utils.JSON_INDEX_LINE] + if issues.JSON_INDEX_LINE in e: + line = ' on line %s ' % e[issues.JSON_INDEX_LINE] msg = '%s in file %s, procedure %s%s' % ( - e[utils.JSON_INDEX_TYPE], - quote(e[utils.JSON_INDEX_FILENAME]), - quote(e[utils.JSON_INDEX_PROCEDURE]), + e[issues.JSON_INDEX_TYPE], + quote(e[issues.JSON_INDEX_FILENAME]), + quote(e[issues.JSON_INDEX_PROCEDURE]), line, ) return msg