mirror of
https://github.com/saltstack/salt.git
synced 2025-04-17 10:10:20 +00:00
Adapted the tests suite to work with salt-testing
This commit is contained in:
parent
e07375db4e
commit
d4fd1baa2f
3 changed files with 318 additions and 651 deletions
|
@ -3,7 +3,6 @@ Set up the Salt integration test suite
|
|||
'''
|
||||
|
||||
# Import Python libs
|
||||
import optparse
|
||||
import multiprocessing
|
||||
import os
|
||||
import sys
|
||||
|
@ -31,15 +30,10 @@ import salt.runner
|
|||
import salt.output
|
||||
from salt.utils import fopen, get_colors
|
||||
from salt.utils.verify import verify_env
|
||||
from saltunittest import TestCase, RedirectStdStreams
|
||||
|
||||
try:
|
||||
import console
|
||||
width, height = console.getTerminalSize()
|
||||
PNUM = width
|
||||
except:
|
||||
PNUM = 70
|
||||
|
||||
from salttesting import TestCase
|
||||
from salttesting.parser import PNUM, print_header
|
||||
from salttesting.helpers import RedirectStdStreams
|
||||
|
||||
INTEGRATION_TEST_DIR = os.path.dirname(
|
||||
os.path.normpath(os.path.abspath(__file__))
|
||||
|
@ -61,69 +55,53 @@ TMP_STATE_TREE = os.path.join(SYS_TMP_DIR, 'salt-temp-state-tree')
|
|||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def print_header(header, sep='~', top=True, bottom=True, inline=False,
|
||||
centered=False):
|
||||
'''
|
||||
Allows some pretty printing of headers on the console, either with a
|
||||
"ruler" on bottom and/or top, inline, centered, etc.
|
||||
'''
|
||||
if top and not inline:
|
||||
print(sep * PNUM)
|
||||
|
||||
if centered and not inline:
|
||||
fmt = u'{0:^{width}}'
|
||||
elif inline and not centered:
|
||||
fmt = u'{0:{sep}<{width}}'
|
||||
elif inline and centered:
|
||||
fmt = u'{0:{sep}^{width}}'
|
||||
else:
|
||||
fmt = u'{0}'
|
||||
print(fmt.format(header, sep=sep, width=PNUM))
|
||||
|
||||
if bottom and not inline:
|
||||
print(sep * PNUM)
|
||||
|
||||
|
||||
def run_tests(TestCase):
|
||||
'''
|
||||
Run integration tests for a chosen test case.
|
||||
|
||||
Function uses optparse to set up test environment
|
||||
'''
|
||||
from saltunittest import TestLoader, TextTestRunner
|
||||
opts = parse_opts()
|
||||
loader = TestLoader()
|
||||
tests = loader.loadTestsFromTestCase(TestCase)
|
||||
print('Setting up Salt daemons to execute tests')
|
||||
with TestDaemon(clean=opts.clean):
|
||||
runner = TextTestRunner(verbosity=opts.verbosity).run(tests)
|
||||
sys.exit(runner.wasSuccessful())
|
||||
from salttesting import TestLoader, TextTestRunner
|
||||
from salttesting.parser import SaltTestingParser
|
||||
|
||||
class TestcaseParser(SaltTestingParser):
|
||||
def setup_additional_options(self):
|
||||
self.option_groups.remove(self.test_selection_group)
|
||||
if self.has_option('--xml-out'):
|
||||
self.remove_option('--xml-out')
|
||||
if self.has_option('--html-out'):
|
||||
self.remove_option('--html-out')
|
||||
self.add_option(
|
||||
'--sysinfo',
|
||||
default=False,
|
||||
action='store_true',
|
||||
help='Print some system information.'
|
||||
)
|
||||
self.output_options_group.add_option(
|
||||
'--no-colors',
|
||||
'--no-colours',
|
||||
default=False,
|
||||
action='store_true',
|
||||
help='Disable colour printing.'
|
||||
)
|
||||
|
||||
def parse_opts():
|
||||
'''
|
||||
Parse command line options for running integration tests
|
||||
'''
|
||||
parser = optparse.OptionParser()
|
||||
parser.add_option('-v',
|
||||
'--verbose',
|
||||
dest='verbosity',
|
||||
default=1,
|
||||
action='count',
|
||||
help='Verbose test runner output')
|
||||
parser.add_option('--clean',
|
||||
dest='clean',
|
||||
default=True,
|
||||
action='store_true',
|
||||
help=('Clean up test environment before and after '
|
||||
'integration testing (default behaviour)'))
|
||||
parser.add_option('--no-clean',
|
||||
dest='clean',
|
||||
action='store_false',
|
||||
help=('Don\'t clean up test environment before and after '
|
||||
'integration testing (speed up test process)'))
|
||||
options, _ = parser.parse_args()
|
||||
return options
|
||||
def run_suite(self, testcase):
|
||||
loader = TestLoader()
|
||||
tests = loader.loadTestsFromTestCase(testcase)
|
||||
print('Setting up Salt daemons to execute tests')
|
||||
with TestDaemon(self):
|
||||
header = '{0} Tests'.format(testcase.__name__)
|
||||
print_header('Starting {0}'.format(header))
|
||||
runner = TextTestRunner(
|
||||
verbosity=self.options.verbosity).run(tests)
|
||||
self.testsuite_results.append((header, runner))
|
||||
return runner.wasSuccessful()
|
||||
|
||||
parser = TestcaseParser(INTEGRATION_TEST_DIR)
|
||||
parser.parse_args()
|
||||
if parser.run_suite(TestCase) is False:
|
||||
parser.finalize(1)
|
||||
parser.finalize(0)
|
||||
|
||||
|
||||
class TestDaemon(object):
|
||||
|
@ -132,9 +110,9 @@ class TestDaemon(object):
|
|||
'''
|
||||
MINIONS_CONNECT_TIMEOUT = MINIONS_SYNC_TIMEOUT = 120
|
||||
|
||||
def __init__(self, opts=None):
|
||||
self.opts = opts
|
||||
self.colors = get_colors(opts.no_colors is False)
|
||||
def __init__(self, parser):
|
||||
self.parser = parser
|
||||
self.colors = get_colors(self.parser.options.no_colors is False)
|
||||
|
||||
def __enter__(self):
|
||||
'''
|
||||
|
@ -270,7 +248,7 @@ class TestDaemon(object):
|
|||
self.pre_setup_minions()
|
||||
self.setup_minions()
|
||||
|
||||
if self.opts.sysinfo:
|
||||
if self.parser.options.sysinfo:
|
||||
from salt import version
|
||||
print_header('~~~~~~~ Versions Report ', inline=True)
|
||||
print('\n'.join(version.versions_report()))
|
||||
|
@ -279,7 +257,6 @@ class TestDaemon(object):
|
|||
'~~~~~~~ Minion Grains Information ', inline=True,
|
||||
)
|
||||
|
||||
|
||||
print_header('', sep='=', inline=True)
|
||||
|
||||
try:
|
||||
|
@ -342,8 +319,8 @@ class TestDaemon(object):
|
|||
|
||||
del wait_minion_connections
|
||||
|
||||
sync_needed = self.opts.clean
|
||||
if self.opts.clean is False:
|
||||
sync_needed = self.parser.options.clean
|
||||
if self.parser.options.clean is False:
|
||||
def sumfile(fpath):
|
||||
# Since we will be do'in this for small files, it should be ok
|
||||
fobj = fopen(fpath)
|
||||
|
@ -413,7 +390,7 @@ class TestDaemon(object):
|
|||
'''
|
||||
Clean out the tmp files
|
||||
'''
|
||||
if not self.opts.clean:
|
||||
if not self.parser.options.clean:
|
||||
return
|
||||
if os.path.isdir(self.sub_minion_opts['root_dir']):
|
||||
shutil.rmtree(self.sub_minion_opts['root_dir'])
|
||||
|
|
|
@ -7,19 +7,13 @@ Discover all instances of unittest.TestCase in this directory.
|
|||
import sys
|
||||
import os
|
||||
import re
|
||||
import logging
|
||||
import optparse
|
||||
import resource
|
||||
import tempfile
|
||||
|
||||
# Import salt libs
|
||||
import saltunittest
|
||||
from integration import print_header, PNUM, TestDaemon, TMP
|
||||
|
||||
try:
|
||||
import xmlrunner
|
||||
except ImportError:
|
||||
xmlrunner = None
|
||||
from salttesting import *
|
||||
from salttesting.parser import PNUM, print_header, SaltTestingParser
|
||||
from integration import TestDaemon, TMP
|
||||
|
||||
TEST_DIR = os.path.dirname(os.path.normpath(os.path.abspath(__file__)))
|
||||
SALT_ROOT = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
|
||||
|
@ -27,6 +21,10 @@ XML_OUTPUT_DIR = os.environ.get(
|
|||
'SALT_XML_TEST_REPORTS_DIR',
|
||||
os.path.join(TMP, 'xml-test-reports')
|
||||
)
|
||||
HTML_OUTPUT_DIR = os.environ.get(
|
||||
'SALT_HTML_TEST_REPORTS_DIR',
|
||||
os.path.join(TMP, 'html-test-reports')
|
||||
)
|
||||
|
||||
|
||||
try:
|
||||
|
@ -50,439 +48,257 @@ except ImportError:
|
|||
|
||||
REQUIRED_OPEN_FILES = 3072
|
||||
|
||||
TEST_RESULTS = []
|
||||
|
||||
class SaltTestsuiteParser(SaltTestingParser):
|
||||
|
||||
def run_suite(opts, path, display_name, suffix='[!_]*.py'):
|
||||
'''
|
||||
Execute a unit test suite
|
||||
'''
|
||||
loader = saltunittest.TestLoader()
|
||||
if opts.name:
|
||||
tests = loader.loadTestsFromName(display_name)
|
||||
else:
|
||||
tests = loader.discover(path, suffix, TEST_DIR)
|
||||
def setup_additional_options(self):
|
||||
self.add_option(
|
||||
'--sysinfo',
|
||||
default=False,
|
||||
action='store_true',
|
||||
help='Print some system information.'
|
||||
)
|
||||
|
||||
header = '{0} Tests'.format(display_name)
|
||||
print_header('Starting {0}'.format(header))
|
||||
self.test_selection_group.add_option(
|
||||
'-m',
|
||||
'--module',
|
||||
'--module-tests',
|
||||
dest='module',
|
||||
default=False,
|
||||
action='store_true',
|
||||
help='Run tests for modules'
|
||||
)
|
||||
self.test_selection_group.add_option(
|
||||
'-S',
|
||||
'--state',
|
||||
'--state-tests',
|
||||
dest='state',
|
||||
default=False,
|
||||
action='store_true',
|
||||
help='Run tests for states'
|
||||
)
|
||||
self.test_selection_group.add_option(
|
||||
'-c',
|
||||
'--client',
|
||||
'--client-tests',
|
||||
dest='client',
|
||||
default=False,
|
||||
action='store_true',
|
||||
help='Run tests for client'
|
||||
)
|
||||
self.test_selection_group.add_option(
|
||||
'-s',
|
||||
'--shell',
|
||||
dest='shell',
|
||||
default=False,
|
||||
action='store_true',
|
||||
help='Run shell tests'
|
||||
)
|
||||
self.test_selection_group.add_option(
|
||||
'-r',
|
||||
'--runner',
|
||||
dest='runner',
|
||||
default=False,
|
||||
action='store_true',
|
||||
help='Run runner tests'
|
||||
)
|
||||
self.test_selection_group.add_option(
|
||||
'-u',
|
||||
'--unit',
|
||||
'--unit-tests',
|
||||
dest='unit',
|
||||
default=False,
|
||||
action='store_true',
|
||||
help='Run unit tests'
|
||||
)
|
||||
self.test_selection_group.add_option(
|
||||
'--run-destructive',
|
||||
action='store_true',
|
||||
default=False,
|
||||
help=('Run destructive tests. These tests can include adding or '
|
||||
'removing users from your system for example. Default: '
|
||||
'%default')
|
||||
)
|
||||
|
||||
if opts.xmlout:
|
||||
runner = xmlrunner.XMLTestRunner(output=XML_OUTPUT_DIR).run(tests)
|
||||
else:
|
||||
if not os.path.isdir(XML_OUTPUT_DIR):
|
||||
os.makedirs(XML_OUTPUT_DIR)
|
||||
runner = saltunittest.TextTestRunner(
|
||||
verbosity=opts.verbosity
|
||||
).run(tests)
|
||||
TEST_RESULTS.append((header, runner))
|
||||
self.output_options_group.add_option(
|
||||
'--coverage',
|
||||
default=False,
|
||||
action='store_true',
|
||||
help='Run tests and report code coverage'
|
||||
)
|
||||
self.output_options_group.add_option(
|
||||
'--no-coverage-report',
|
||||
default=False,
|
||||
action='store_true',
|
||||
help='Don\'t build the coverage HTML report'
|
||||
)
|
||||
self.output_options_group.add_option(
|
||||
'--no-colors',
|
||||
'--no-colours',
|
||||
default=False,
|
||||
action='store_true',
|
||||
help='Disable colour printing.'
|
||||
)
|
||||
|
||||
return runner.wasSuccessful()
|
||||
|
||||
|
||||
def run_integration_suite(opts, suite_folder, display_name):
|
||||
'''
|
||||
Run an integration test suite
|
||||
'''
|
||||
path = os.path.join(TEST_DIR, 'integration', suite_folder)
|
||||
return run_suite(opts, path, display_name)
|
||||
|
||||
|
||||
def run_integration_tests(opts):
|
||||
'''
|
||||
Execute the integration tests suite
|
||||
'''
|
||||
if opts.unit and not (opts.runner or opts.state or opts.module or opts.client):
|
||||
return [True]
|
||||
smax_open_files, hmax_open_files = resource.getrlimit(resource.RLIMIT_NOFILE)
|
||||
if smax_open_files < REQUIRED_OPEN_FILES:
|
||||
print('~' * PNUM)
|
||||
print('Max open files setting is too low({0}) for running the tests'.format(smax_open_files))
|
||||
print('Trying to raise the limit to {0}'.format(REQUIRED_OPEN_FILES))
|
||||
if hmax_open_files < 4096:
|
||||
hmax_open_files = 4096 # Decent default?
|
||||
try:
|
||||
resource.setrlimit(
|
||||
resource.RLIMIT_NOFILE,
|
||||
(REQUIRED_OPEN_FILES, hmax_open_files)
|
||||
def validate_options(self):
|
||||
if self.options.coverage and code_coverage is None:
|
||||
self.error(
|
||||
'Cannot run tests with coverage report. '
|
||||
'Please install coverage>=3.5.3'
|
||||
)
|
||||
except Exception as err:
|
||||
print('ERROR: Failed to raise the max open files setting -> {0}'.format(err))
|
||||
print('Please issue the following command on your console:')
|
||||
print(' ulimit -n {0}'.format(REQUIRED_OPEN_FILES))
|
||||
sys.exit(1)
|
||||
finally:
|
||||
print('~' * PNUM)
|
||||
|
||||
print_header('Setting up Salt daemons to execute tests', top=False)
|
||||
status = []
|
||||
if not any([opts.client, opts.module, opts.runner,
|
||||
opts.shell, opts.state, opts.name]):
|
||||
return status
|
||||
with TestDaemon(opts=opts):
|
||||
if opts.name:
|
||||
for name in opts.name:
|
||||
results = run_suite(opts, '', name)
|
||||
status.append(results)
|
||||
if opts.runner:
|
||||
status.append(run_integration_suite(opts, 'runners', 'Runner'))
|
||||
if opts.module:
|
||||
status.append(run_integration_suite(opts, 'modules', 'Module'))
|
||||
if opts.state:
|
||||
status.append(run_integration_suite(opts, 'states', 'State'))
|
||||
if opts.client:
|
||||
status.append(run_integration_suite(opts, 'client', 'Client'))
|
||||
if opts.shell:
|
||||
status.append(run_integration_suite(opts, 'shell', 'Shell'))
|
||||
return status
|
||||
|
||||
|
||||
def run_unit_tests(opts):
|
||||
'''
|
||||
Execute the unit tests
|
||||
'''
|
||||
if not opts.unit:
|
||||
return [True]
|
||||
status = []
|
||||
if opts.name:
|
||||
for name in opts.name:
|
||||
results = run_suite(opts, os.path.join(TEST_DIR, 'unit'), name)
|
||||
status.append(results)
|
||||
else:
|
||||
results = run_suite(
|
||||
opts, os.path.join(TEST_DIR, 'unit'), 'Unit', '*_test.py')
|
||||
status.append(results)
|
||||
return status
|
||||
|
||||
|
||||
def parse_opts():
|
||||
'''
|
||||
Parse command line options for running specific tests
|
||||
'''
|
||||
parser = optparse.OptionParser()
|
||||
|
||||
parser.add_option(
|
||||
'--sysinfo',
|
||||
default=False,
|
||||
action='store_true',
|
||||
help='Print some system information.'
|
||||
)
|
||||
|
||||
tests_select_group = optparse.OptionGroup(
|
||||
parser,
|
||||
"Tests Selection Options",
|
||||
"Select which tests are to be executed"
|
||||
)
|
||||
tests_select_group.add_option(
|
||||
'-m',
|
||||
'--module',
|
||||
'--module-tests',
|
||||
dest='module',
|
||||
default=False,
|
||||
action='store_true',
|
||||
help='Run tests for modules'
|
||||
)
|
||||
tests_select_group.add_option(
|
||||
'-S',
|
||||
'--state',
|
||||
'--state-tests',
|
||||
dest='state',
|
||||
default=False,
|
||||
action='store_true',
|
||||
help='Run tests for states'
|
||||
)
|
||||
tests_select_group.add_option(
|
||||
'-c',
|
||||
'--client',
|
||||
'--client-tests',
|
||||
dest='client',
|
||||
default=False,
|
||||
action='store_true',
|
||||
help='Run tests for client'
|
||||
)
|
||||
tests_select_group.add_option(
|
||||
'-s',
|
||||
'--shell',
|
||||
dest='shell',
|
||||
default=False,
|
||||
action='store_true',
|
||||
help='Run shell tests'
|
||||
)
|
||||
tests_select_group.add_option(
|
||||
'-r',
|
||||
'--runner',
|
||||
dest='runner',
|
||||
default=False,
|
||||
action='store_true',
|
||||
help='Run runner tests'
|
||||
)
|
||||
tests_select_group.add_option(
|
||||
'-u',
|
||||
'--unit',
|
||||
'--unit-tests',
|
||||
dest='unit',
|
||||
default=False,
|
||||
action='store_true',
|
||||
help='Run unit tests'
|
||||
)
|
||||
tests_select_group.add_option(
|
||||
'-n',
|
||||
'--name',
|
||||
dest='name',
|
||||
action='append',
|
||||
default=[],
|
||||
help=('Specific test name to run. A named test is the module path '
|
||||
'relative to the tests directory, to execute the config module '
|
||||
'integration tests for instance call:\n'
|
||||
'runtests.py -n integration.modules.config')
|
||||
)
|
||||
tests_select_group.add_option(
|
||||
'--run-destructive',
|
||||
action='store_true',
|
||||
default=False,
|
||||
help='Run destructive tests. These tests can include adding or '
|
||||
'removing users from your system for example. Default: %default'
|
||||
)
|
||||
parser.add_option_group(tests_select_group)
|
||||
|
||||
fs_cleanup_options_group = optparse.OptionGroup(
|
||||
parser, "File system cleanup Options"
|
||||
)
|
||||
fs_cleanup_options_group.add_option(
|
||||
'--clean',
|
||||
dest='clean',
|
||||
default=True,
|
||||
action='store_true',
|
||||
help='Clean up test environment before and after integration '
|
||||
'testing (default behaviour)'
|
||||
)
|
||||
fs_cleanup_options_group.add_option(
|
||||
'--no-clean',
|
||||
dest='clean',
|
||||
action='store_false',
|
||||
help='Don\'t clean up test environment before and after integration '
|
||||
'testing (speed up test process)'
|
||||
)
|
||||
parser.add_option_group(fs_cleanup_options_group)
|
||||
|
||||
output_options_group = optparse.OptionGroup(parser, "Output Options")
|
||||
output_options_group.add_option(
|
||||
'-v',
|
||||
'--verbose',
|
||||
dest='verbosity',
|
||||
default=1,
|
||||
action='count',
|
||||
help='Verbose test runner output'
|
||||
)
|
||||
output_options_group.add_option(
|
||||
'-x',
|
||||
'--xml',
|
||||
dest='xmlout',
|
||||
default=False,
|
||||
action='store_true',
|
||||
help='XML test runner output(Output directory: {0})'.format(
|
||||
XML_OUTPUT_DIR
|
||||
)
|
||||
)
|
||||
output_options_group.add_option(
|
||||
'--no-report',
|
||||
default=False,
|
||||
action='store_true',
|
||||
help='Do NOT show the overall tests result'
|
||||
)
|
||||
output_options_group.add_option(
|
||||
'--coverage',
|
||||
default=False,
|
||||
action='store_true',
|
||||
help='Run tests and report code coverage'
|
||||
)
|
||||
output_options_group.add_option(
|
||||
'--no-coverage-report',
|
||||
default=False,
|
||||
action='store_true',
|
||||
help='Don\'t build the coverage HTML report'
|
||||
)
|
||||
output_options_group.add_option(
|
||||
'--no-colors',
|
||||
'--no-colours',
|
||||
default=False,
|
||||
action='store_true',
|
||||
help='Disable colour printing.'
|
||||
)
|
||||
parser.add_option_group(output_options_group)
|
||||
|
||||
options, _ = parser.parse_args()
|
||||
|
||||
if options.xmlout and xmlrunner is None:
|
||||
parser.error(
|
||||
'\'--xml\' is not available. The xmlrunner library is not '
|
||||
'installed.'
|
||||
)
|
||||
|
||||
if options.coverage and code_coverage is None:
|
||||
parser.error(
|
||||
'Cannot run tests with coverage report. '
|
||||
'Please install coverage>=3.5.3'
|
||||
)
|
||||
elif options.coverage:
|
||||
coverage_version = tuple(
|
||||
[int(part) for part in
|
||||
re.search(r'([0-9.]+)', coverage.__version__).group(0).split('.')]
|
||||
)
|
||||
if coverage_version < (3, 5, 3):
|
||||
# Should we just print the error instead of exiting?
|
||||
parser.error(
|
||||
'Versions lower than 3.5.3 of the coverage library are know '
|
||||
'to produce incorrect results. Please consider upgrading...'
|
||||
)
|
||||
|
||||
if any((options.module, options.client, options.shell, options.unit,
|
||||
options.state, options.runner, options.name,
|
||||
os.geteuid() != 0, not options.run_destructive)):
|
||||
parser.error(
|
||||
'No sense in generating the tests coverage report when not '
|
||||
'running the full test suite, including the destructive '
|
||||
'tests, as \'root\'. It would only produce incorrect '
|
||||
'results.'
|
||||
)
|
||||
|
||||
# Update environ so that any subprocess started on test are also
|
||||
# included in the report
|
||||
os.environ['COVERAGE_PROCESS_START'] = '1'
|
||||
|
||||
# Setup logging
|
||||
formatter = logging.Formatter(
|
||||
'%(asctime)s,%(msecs)03.0f [%(name)-5s:%(lineno)-4d]'
|
||||
'[%(levelname)-8s] %(message)s',
|
||||
datefmt='%H:%M:%S'
|
||||
)
|
||||
logfile = os.path.join(tempfile.gettempdir(), 'salt-runtests.log')
|
||||
filehandler = logging.FileHandler(
|
||||
mode='w', # Not preserved between re-runs
|
||||
filename=logfile
|
||||
)
|
||||
filehandler.setLevel(logging.DEBUG)
|
||||
filehandler.setFormatter(formatter)
|
||||
logging.root.addHandler(filehandler)
|
||||
logging.root.setLevel(logging.DEBUG)
|
||||
|
||||
print_header('Logging tests on {0}'.format(logfile), bottom=False)
|
||||
print('Current Directory: {0}'.format(os.getcwd()))
|
||||
print_header(
|
||||
'Test suite is running under PID {0}'.format(os.getpid()), bottom=False
|
||||
)
|
||||
|
||||
# With greater verbosity we can also log to the console
|
||||
if options.verbosity > 2:
|
||||
consolehandler = logging.StreamHandler(sys.stderr)
|
||||
consolehandler.setLevel(logging.INFO) # -vv
|
||||
consolehandler.setFormatter(formatter)
|
||||
handled_levels = {
|
||||
3: logging.DEBUG, # -vvv
|
||||
4: logging.TRACE, # -vvvv
|
||||
5: logging.GARBAGE # -vvvvv
|
||||
}
|
||||
if options.verbosity > 3:
|
||||
consolehandler.setLevel(
|
||||
handled_levels.get(
|
||||
options.verbosity,
|
||||
options.verbosity > 5 and 5 or 3
|
||||
elif self.options.coverage:
|
||||
coverage_version = tuple([
|
||||
int(part) for part in re.search(
|
||||
r'([0-9.]+)', coverage.__version__).group(0).split('.')
|
||||
])
|
||||
if coverage_version < (3, 5, 3):
|
||||
# Should we just print the error instead of exiting?
|
||||
self.error(
|
||||
'Versions lower than 3.5.3 of the coverage library are '
|
||||
'know to produce incorrect results. Please consider '
|
||||
'upgrading...'
|
||||
)
|
||||
)
|
||||
# Update environ so that any subprocess started on test are also
|
||||
# included in the report
|
||||
os.environ['COVERAGE_PROCESS_START'] = '1'
|
||||
|
||||
logging.root.addHandler(consolehandler)
|
||||
if any((self.options.module, self.options.client,
|
||||
self.options.shell, self.options.unit, self.options.state,
|
||||
self.options.runner, self.options.name, os.geteuid() != 0,
|
||||
not self.options.run_destructive)):
|
||||
self.error(
|
||||
'No sense in generating the tests coverage report when '
|
||||
'not running the full test suite, including the '
|
||||
'destructive tests, as \'root\'. It would only produce '
|
||||
'incorrect results.'
|
||||
)
|
||||
|
||||
os.environ['DESTRUCTIVE_TESTS'] = str(options.run_destructive)
|
||||
# Set the required environment variable in order to know if destructive
|
||||
# tests should be executed or not.
|
||||
os.environ['DESTRUCTIVE_TESTS'] = str(self.options.run_destructive)
|
||||
|
||||
if not any((options.module, options.client,
|
||||
options.shell, options.unit,
|
||||
options.state, options.runner,
|
||||
options.name)):
|
||||
options.module = True
|
||||
options.client = True
|
||||
options.shell = True
|
||||
options.unit = True
|
||||
options.runner = True
|
||||
options.state = True
|
||||
return options
|
||||
# Set test suite defaults if no specific suite options are provided
|
||||
if not any((self.options.module, self.options.client,
|
||||
self.options.shell, self.options.unit, self.options.state,
|
||||
self.options.runner, self.options.name)):
|
||||
self.options.module = True
|
||||
self.options.client = True
|
||||
self.options.shell = True
|
||||
self.options.unit = True
|
||||
self.options.runner = True
|
||||
self.options.state = True
|
||||
|
||||
if self.options.coverage:
|
||||
code_coverage.start()
|
||||
|
||||
if __name__ == '__main__':
|
||||
opts = parse_opts()
|
||||
if opts.coverage:
|
||||
code_coverage.start()
|
||||
def run_integration_suite(self, suite_folder, display_name):
|
||||
'''
|
||||
Run an integration test suite
|
||||
'''
|
||||
path = os.path.join(TEST_DIR, 'integration', suite_folder)
|
||||
return self.run_suite(path, display_name)
|
||||
|
||||
overall_status = []
|
||||
status = run_integration_tests(opts)
|
||||
overall_status.extend(status)
|
||||
status = run_unit_tests(opts)
|
||||
overall_status.extend(status)
|
||||
false_count = overall_status.count(False)
|
||||
def run_integration_tests(self):
|
||||
'''
|
||||
Execute the integration tests suite
|
||||
'''
|
||||
if self.options.unit and not (self.options.runner or
|
||||
self.options.state or
|
||||
self.options.module or
|
||||
self.options.client):
|
||||
return [True]
|
||||
|
||||
if opts.no_report:
|
||||
if opts.coverage:
|
||||
code_coverage.stop()
|
||||
code_coverage.save()
|
||||
|
||||
if false_count > 0:
|
||||
sys.exit(1)
|
||||
else:
|
||||
sys.exit(0)
|
||||
|
||||
print_header(u' Overall Tests Report ', sep=u'=', centered=True, inline=True)
|
||||
|
||||
no_problems_found = True
|
||||
for (name, results) in TEST_RESULTS:
|
||||
if not results.failures and not results.errors and not results.skipped:
|
||||
continue
|
||||
|
||||
no_problems_found = False
|
||||
|
||||
print_header(u'*** {0} '.format(name), sep=u'*', inline=True)
|
||||
if results.skipped:
|
||||
print_header(u' -------- Skipped Tests ', sep='-', inline=True)
|
||||
maxlen = len(max([tc.id() for (tc, reason) in results.skipped], key=len))
|
||||
fmt = u' -> {0: <{maxlen}} -> {1}'
|
||||
for tc, reason in results.skipped:
|
||||
print(fmt.format(tc.id(), reason, maxlen=maxlen))
|
||||
print_header(u' ', sep='-', inline=True)
|
||||
|
||||
if results.errors:
|
||||
print_header(u' -------- Tests with Errors ', sep='-', inline=True)
|
||||
for tc, reason in results.errors:
|
||||
print_header(u' -> {0} '.format(tc.id()), sep=u'.', inline=True)
|
||||
for line in reason.rstrip().splitlines():
|
||||
print(' {0}'.format(line.rstrip()))
|
||||
print_header(u' ', sep=u'.', inline=True)
|
||||
print_header(u' ', sep='-', inline=True)
|
||||
|
||||
if results.failures:
|
||||
print_header(u' -------- Failed Tests ', sep='-', inline=True)
|
||||
for tc, reason in results.failures:
|
||||
print_header(u' -> {0} '.format(tc.id()), sep=u'.', inline=True)
|
||||
for line in reason.rstrip().splitlines():
|
||||
print(' {0}'.format(line.rstrip()))
|
||||
print_header(u' ', sep=u'.', inline=True)
|
||||
print_header(u' ', sep='-', inline=True)
|
||||
|
||||
print_header(u'', sep=u'*', inline=True)
|
||||
|
||||
if no_problems_found:
|
||||
print_header(
|
||||
u'*** No Problems Found While Running Tests ',
|
||||
sep=u'*', inline=True
|
||||
smax_open_files, hmax_open_files = resource.getrlimit(
|
||||
resource.RLIMIT_NOFILE
|
||||
)
|
||||
if smax_open_files < REQUIRED_OPEN_FILES:
|
||||
print('~' * PNUM)
|
||||
print(
|
||||
'Max open files setting is too low({0}) for running the '
|
||||
'tests'.format(smax_open_files)
|
||||
)
|
||||
print(
|
||||
'Trying to raise the limit to {0}'.format(REQUIRED_OPEN_FILES)
|
||||
)
|
||||
if hmax_open_files < 4096:
|
||||
hmax_open_files = 4096 # Decent default?
|
||||
try:
|
||||
resource.setrlimit(
|
||||
resource.RLIMIT_NOFILE,
|
||||
(REQUIRED_OPEN_FILES, hmax_open_files)
|
||||
)
|
||||
except Exception as err:
|
||||
print(
|
||||
'ERROR: Failed to raise the max open files setting -> '
|
||||
'{0}'.format(err)
|
||||
)
|
||||
print('Please issue the following command on your console:')
|
||||
print(' ulimit -n {0}'.format(REQUIRED_OPEN_FILES))
|
||||
self.exit()
|
||||
finally:
|
||||
print('~' * PNUM)
|
||||
|
||||
print_header(' Overall Tests Report ', sep='=', centered=True, inline=True)
|
||||
print_header('Setting up Salt daemons to execute tests', top=False)
|
||||
status = []
|
||||
if not any([self.options.client, self.options.module,
|
||||
self.options.runner, self.options.shell,
|
||||
self.options.state, self.options.name]):
|
||||
return status
|
||||
with TestDaemon(self):
|
||||
if self.options.name:
|
||||
for name in self.options.name:
|
||||
results = self.run_suite('', name)
|
||||
status.append(results)
|
||||
if self.options.runner:
|
||||
status.append(self.run_integration_suite('runners', 'Runner'))
|
||||
if self.options.module:
|
||||
status.append(self.run_integration_suite('modules', 'Module'))
|
||||
if self.options.state:
|
||||
status.append(self.run_integration_suite('states', 'State'))
|
||||
if self.options.client:
|
||||
status.append(self.run_integration_suite('client', 'Client'))
|
||||
if self.options.shell:
|
||||
status.append(self.run_integration_suite('shell', 'Shell'))
|
||||
return status
|
||||
|
||||
def run_unit_tests(self):
|
||||
'''
|
||||
Execute the unit tests
|
||||
'''
|
||||
if not self.options.unit:
|
||||
return [True]
|
||||
status = []
|
||||
if self.options.name:
|
||||
for name in self.options.name:
|
||||
results = self.run_suite(os.path.join(TEST_DIR, 'unit'), name)
|
||||
status.append(results)
|
||||
else:
|
||||
results = self.run_suite(
|
||||
os.path.join(TEST_DIR, 'unit'), 'Unit', '*_test.py'
|
||||
)
|
||||
status.append(results)
|
||||
return status
|
||||
|
||||
def print_overall_testsuite_report(self):
|
||||
SaltTestingParser.print_overall_testsuite_report(self)
|
||||
if not self.options.coverage:
|
||||
return
|
||||
|
||||
if opts.coverage:
|
||||
print('Stopping and saving coverage info')
|
||||
code_coverage.stop()
|
||||
code_coverage.save()
|
||||
print('Current Directory: {0}'.format(os.getcwd()))
|
||||
print('Coverage data file exists? {0}'.format(os.path.isfile('.coverage')))
|
||||
print(
|
||||
'Coverage data file exists? {0}'.format(
|
||||
os.path.isfile('.coverage')
|
||||
)
|
||||
)
|
||||
|
||||
if opts.no_coverage_report is False:
|
||||
report_dir = os.path.join(os.path.dirname(__file__), 'coverage-report')
|
||||
if self.options.no_coverage_report is False:
|
||||
report_dir = os.path.join(
|
||||
os.path.dirname(__file__),
|
||||
'coverage-report'
|
||||
)
|
||||
print(
|
||||
'\nGenerating Coverage HTML Report Under {0!r} ...'.format(
|
||||
report_dir
|
||||
|
@ -496,7 +312,37 @@ if __name__ == '__main__':
|
|||
code_coverage.html_report(directory=report_dir)
|
||||
print('Done.\n')
|
||||
|
||||
def finalize(self, exit_code):
|
||||
if self.options.no_report:
|
||||
if self.options.coverage:
|
||||
code_coverage.stop()
|
||||
code_coverage.save()
|
||||
SaltTestingParser.finalize(self, exit_code)
|
||||
|
||||
|
||||
def main():
|
||||
'''
|
||||
Parse command line options for running specific tests
|
||||
'''
|
||||
parser = SaltTestsuiteParser(
|
||||
TEST_DIR,
|
||||
xml_output_dir=XML_OUTPUT_DIR,
|
||||
html_output_dir=HTML_OUTPUT_DIR,
|
||||
tests_logfile=os.path.join(tempfile.gettempdir(), 'salt-runtests.log')
|
||||
)
|
||||
parser.parse_args()
|
||||
|
||||
overall_status = []
|
||||
status = parser.run_integration_tests()
|
||||
overall_status.extend(status)
|
||||
status = parser.run_unit_tests()
|
||||
overall_status.extend(status)
|
||||
false_count = overall_status.count(False)
|
||||
|
||||
if false_count > 0:
|
||||
sys.exit(1)
|
||||
else:
|
||||
sys.exit(0)
|
||||
parser.finalize(1)
|
||||
parser.finalize(0)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
|
|
|
@ -44,159 +44,3 @@ SALT_LIBS = os.path.dirname(TEST_DIR)
|
|||
for dir_ in [TEST_DIR, SALT_LIBS]:
|
||||
if not dir_ in sys.path:
|
||||
sys.path.insert(0, dir_)
|
||||
|
||||
|
||||
def destructiveTest(func):
|
||||
@wraps(func)
|
||||
def wrap(cls):
|
||||
if os.environ.get('DESTRUCTIVE_TESTS', 'False').lower() == 'false':
|
||||
cls.skipTest('Destructive tests are disabled')
|
||||
return func(cls)
|
||||
return wrap
|
||||
|
||||
|
||||
class RedirectStdStreams(object):
|
||||
"""
|
||||
Temporarily redirect system output to file like objects.
|
||||
Default is to redirect to `os.devnull`, which just mutes output, `stdout`
|
||||
and `stderr`.
|
||||
"""
|
||||
|
||||
def __init__(self, stdout=None, stderr=None):
|
||||
if stdout is None:
|
||||
stdout = open(os.devnull, 'w')
|
||||
if stderr is None:
|
||||
stderr = open(os.devnull, 'w')
|
||||
|
||||
self.__stdout = stdout
|
||||
self.__stderr = stderr
|
||||
self.__redirected = False
|
||||
|
||||
def __enter__(self):
|
||||
self.redirect()
|
||||
return self
|
||||
|
||||
def __exit__(self, exc_type, exc_value, traceback):
|
||||
self.unredirect()
|
||||
|
||||
def redirect(self):
|
||||
self.old_stdout = sys.stdout
|
||||
self.old_stdout.flush()
|
||||
self.old_stderr = sys.stderr
|
||||
self.old_stderr.flush()
|
||||
sys.stdout = self.__stdout
|
||||
sys.stderr = self.__stderr
|
||||
self.__redirected = True
|
||||
|
||||
def unredirect(self):
|
||||
if not self.__redirected:
|
||||
return
|
||||
try:
|
||||
self.__stdout.flush()
|
||||
self.__stdout.close()
|
||||
except ValueError:
|
||||
# already closed?
|
||||
pass
|
||||
try:
|
||||
self.__stderr.flush()
|
||||
self.__stderr.close()
|
||||
except ValueError:
|
||||
# already closed?
|
||||
pass
|
||||
|
||||
sys.stdout = self.old_stdout
|
||||
sys.stderr = self.old_stderr
|
||||
|
||||
def flush(self):
|
||||
if self.__redirected:
|
||||
try:
|
||||
self.__stdout.flush()
|
||||
except:
|
||||
pass
|
||||
try:
|
||||
self.__stderr.flush()
|
||||
except:
|
||||
pass
|
||||
|
||||
|
||||
class TestsLoggingHandler(object):
|
||||
'''
|
||||
Simple logging handler which can be used to test if certain logging
|
||||
messages get emitted or not::
|
||||
|
||||
..code-block: python
|
||||
|
||||
with TestsLoggingHandler() as handler:
|
||||
# (...) Do what ever you wish here
|
||||
handler.messages # here are the emitted log messages
|
||||
|
||||
'''
|
||||
def __init__(self, level=0, format='%(levelname)s:%(message)s'):
|
||||
self.level = level
|
||||
self.format = format
|
||||
self.activated = False
|
||||
self.prev_logging_level = None
|
||||
|
||||
def activate(self):
|
||||
class Handler(logging.Handler):
|
||||
def __init__(self, level):
|
||||
logging.Handler.__init__(self, level)
|
||||
self.messages = []
|
||||
|
||||
def emit(self, record):
|
||||
self.messages.append(self.format(record))
|
||||
|
||||
self.handler = Handler(self.level)
|
||||
formatter = logging.Formatter(self.format)
|
||||
self.handler.setFormatter(formatter)
|
||||
logging.root.addHandler(self.handler)
|
||||
self.activated = True
|
||||
# Make sure we're running with the lowest logging level with our
|
||||
# tests logging handler
|
||||
current_logging_level = logging.root.getEffectiveLevel()
|
||||
if current_logging_level > logging.DEBUG:
|
||||
self.prev_logging_level = current_logging_level
|
||||
logging.root.setLevel(0)
|
||||
|
||||
def deactivate(self):
|
||||
if not self.activated:
|
||||
return
|
||||
logging.root.removeHandler(self.handler)
|
||||
# Restore previous logging level if changed
|
||||
if self.prev_logging_level is not None:
|
||||
logging.root.setLevel(self.prev_logging_level)
|
||||
|
||||
@property
|
||||
def messages(self):
|
||||
if not self.activated:
|
||||
return []
|
||||
return self.handler.messages
|
||||
|
||||
def clear(self):
|
||||
self.handler.messages = []
|
||||
|
||||
def __enter__(self):
|
||||
self.activate()
|
||||
return self
|
||||
|
||||
def __exit__(self, type, value, traceback):
|
||||
self.deactivate()
|
||||
self.activated = False
|
||||
|
||||
# Mimic some handler attributes and methods
|
||||
@property
|
||||
def lock(self):
|
||||
if self.activated:
|
||||
return self.handler.lock
|
||||
|
||||
def createLock(self):
|
||||
if self.activated:
|
||||
return self.handler.createLock()
|
||||
|
||||
def acquire(self):
|
||||
if self.activated:
|
||||
return self.handler.acquire()
|
||||
|
||||
def release(self):
|
||||
if self.activated:
|
||||
return self.handler.release()
|
||||
|
|
Loading…
Add table
Reference in a new issue