options to run selective tests in build_integration_tests

Summary:public
see the file for some example usage

Reviewed By: sblackshear, jeremydubreil

Differential Revision: D3058440

fb-gh-sync-id: c891dfe
shipit-source-id: c891dfe
master
Jules Villard 9 years ago committed by Facebook Github Bot 8
parent 63b1df9966
commit 588cdcde42

@ -6,6 +6,16 @@
# LICENSE file in the root directory of this source tree. An additional grant # LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory. # of patent rights can be found in the PATENTS file in the same directory.
# example usage:
# # run all the tests
# ./build_integration_test.py
# # run only the ant and gradle tests
# ./build_integration_test.py -- ant gradle
# # run no test
# ./build_integration_test.py --
# # run only the buck tests and record the output
# INFER_RECORD_INTEGRATION_TESTS=1 ./build_integration_test.py -- buck
from __future__ import absolute_import from __future__ import absolute_import
from __future__ import division from __future__ import division
from __future__ import print_function from __future__ import print_function
@ -43,6 +53,11 @@ REPORT_FIELDS = [
EXPECTED_OUTPUTS_DIR = os.path.join(SCRIPT_DIR, 'expected_outputs') EXPECTED_OUTPUTS_DIR = os.path.join(SCRIPT_DIR, 'expected_outputs')
ALL_TESTS = ['ant', 'buck', 'gradle']
to_test = ALL_TESTS
def should_record_tests(): def should_record_tests():
return RECORD_ENV in os.environ and os.environ[RECORD_ENV] == '1' return RECORD_ENV in os.environ and os.environ[RECORD_ENV] == '1'
@ -168,8 +183,11 @@ def do_test(errors, expected_errors_filename):
class BuildIntegrationTest(unittest.TestCase): class BuildIntegrationTest(unittest.TestCase):
def test_ant_integration(self): def test_ant_integration(self):
if is_tool_available(['ant', '-version']): if not ('ant' in to_test and is_tool_available(['ant', '-version'])):
print('\nRunning Gradle integration test') print('\nSkipping Ant integration test')
return
print('\nRunning Ant integration test')
root = os.path.join(SCRIPT_DIR, os.pardir) root = os.path.join(SCRIPT_DIR, os.pardir)
errors = run_analysis( errors = run_analysis(
root, root,
@ -178,11 +196,13 @@ class BuildIntegrationTest(unittest.TestCase):
INFER_EXECUTABLE) INFER_EXECUTABLE)
original = os.path.join(EXPECTED_OUTPUTS_DIR, 'ant_report.json') original = os.path.join(EXPECTED_OUTPUTS_DIR, 'ant_report.json')
do_test(errors, original) do_test(errors, original)
else:
print('\nSkipping Ant integration test')
assert True
def test_gradle_integration(self): def test_gradle_integration(self):
if 'gradle' not in to_test:
print('\nSkipping Gradle integration test')
return
print('\nRunning Gradle integration test using mock gradle') print('\nRunning Gradle integration test using mock gradle')
root = os.path.join(ROOT_DIR, 'examples', 'java_hello') root = os.path.join(ROOT_DIR, 'examples', 'java_hello')
env = os.environ env = os.environ
@ -200,7 +220,11 @@ class BuildIntegrationTest(unittest.TestCase):
do_test(errors, original) do_test(errors, original)
def test_buck_integration(self): def test_buck_integration(self):
if is_tool_available(['buck', '--version']): if not ('buck' in to_test and
is_tool_available(['buck', '--version'])):
print('\nSkipping Buck integration test')
return
print('\nRunning Buck integration test') print('\nRunning Buck integration test')
errors = run_analysis( errors = run_analysis(
ROOT_DIR, ROOT_DIR,
@ -209,10 +233,15 @@ class BuildIntegrationTest(unittest.TestCase):
INFER_EXECUTABLE) INFER_EXECUTABLE)
original = os.path.join(EXPECTED_OUTPUTS_DIR, 'buck_report.json') original = os.path.join(EXPECTED_OUTPUTS_DIR, 'buck_report.json')
do_test(errors, original) do_test(errors, original)
else:
print('\nSkipping Buck integration test')
assert True
if __name__ == '__main__': if __name__ == '__main__':
# hackish capturing of the arguments after '--'
try:
i = sys.argv.index('--')
to_test = sys.argv[i + 1:]
sys.argv = sys.argv[:i]
except ValueError:
pass
unittest.main() # run all the tests unittest.main() # run all the tests

Loading…
Cancel
Save