Add the new refactored salt logging.

It also handles multiprocessing logging.
This commit is contained in:
Pedro Algarvio 2021-06-28 12:07:24 +01:00 committed by Megan Wilhite
parent b22eb604d8
commit a9d5e75b52
28 changed files with 1813 additions and 793 deletions

View file

@ -132,5 +132,9 @@ __define_global_system_encoding_variable__()
# This is now garbage collectable
del __define_global_system_encoding_variable__
# Import Salt's logging machinery
import salt._logging.impl # isort:skip pylint: disable=unused-import
# Make sure Salt's logging tweaks are always present
# DO NOT MOVE THIS IMPORT
# pylint: disable=unused-import
import salt._logging # isort:skip
# pylint: enable=unused-import

View file

@ -9,5 +9,43 @@
The ``salt._logging`` package should be imported as soon as possible since salt tweaks
the python's logging system.
"""
from salt._logging.impl import * # pylint: disable=wildcard-import
from salt._logging.impl import (
DFLT_LOG_DATEFMT,
DFLT_LOG_DATEFMT_LOGFILE,
DFLT_LOG_FMT_CONSOLE,
DFLT_LOG_FMT_JID,
DFLT_LOG_FMT_LOGFILE,
LOG_COLORS,
LOG_LEVELS,
LOG_VALUES_TO_LEVELS,
SORTED_LEVEL_NAMES,
freeze_logging_options_dict,
get_console_handler,
get_extended_logging_handlers,
get_log_record_factory,
get_logfile_handler,
get_logging_level_from_string,
get_logging_options_dict,
get_lowest_log_level,
get_temp_handler,
in_mainprocess,
is_console_handler_configured,
is_extended_logging_configured,
is_logfile_handler_configured,
is_temp_handler_configured,
set_log_record_factory,
set_logging_options_dict,
set_lowest_log_level,
set_lowest_log_level_by_opts,
setup_console_handler,
setup_extended_logging,
setup_log_granular_levels,
setup_logfile_handler,
setup_logging,
setup_temp_handler,
shutdown_console_handler,
shutdown_extended_logging,
shutdown_logfile_handler,
shutdown_logging,
shutdown_temp_handler,
)

View file

@ -8,7 +8,7 @@
import copy
import logging
import logging.handlers
import queue
import queue as _queue
import sys
from collections import deque
@ -41,7 +41,6 @@ class TemporaryLoggingHandler(logging.NullHandler):
"'{name}.TemporaryLoggingHandler' will go away after "
"{{date}}.".format(name=__name__),
)
self.__max_queue_size = max_queue_size
super().__init__(level=level)
self.__messages = deque(maxlen=max_queue_size)
@ -73,6 +72,64 @@ class StreamHandler(ExcInfoOnLogLevelFormatMixin, logging.StreamHandler):
"""
class DeferredStreamHandler(StreamHandler):
"""
This logging handler will store all the log records up to its maximum
queue size at which stage the first messages stored will be dropped.
Should only be used as a temporary logging handler, while the logging
system is not fully configured.
Once configured, pass any logging handlers that should have received the
initial log messages to the function
:func:`DeferredStreamHandler.sync_with_handlers` and all stored log
records will be dispatched to the provided handlers.
If anything goes wrong before logging is properly setup, all stored messages
will be flushed to the handler's stream, ie, written to console.
.. versionadded:: 3005.0
"""
def __init__(self, stream, max_queue_size=10000):
super().__init__(stream)
self.__messages = deque(maxlen=max_queue_size)
self.__emitting = False
def handle(self, record):
self.acquire()
self.__messages.append(record)
self.release()
def flush(self):
if self.__emitting:
# We set the flushing flag because the stream handler actually calls flush when
# emitting a log record and we don't want to cause a RecursionError
return
while self.__messages:
try:
self.__emitting = True
record = self.__messages.popleft()
# We call the parent's class handle method so it's actually
# handled and not queued back
# However, temporarily
super().handle(record)
finally:
self.__emitting = False
super().flush()
def sync_with_handlers(self, handlers=()):
"""
Sync the stored log records to the provided log handlers.
"""
while self.__messages:
record = self.__messages.popleft()
for handler in handlers:
if handler is self:
continue
handler.handle(record)
class FileHandler(ExcInfoOnLogLevelFormatMixin, logging.FileHandler):
"""
File handler which properly handles exc_info on a per handler basis
@ -185,7 +242,7 @@ if sys.version_info < (3, 7):
"""
try:
self.queue.put_nowait(record)
except queue.Full:
except _queue.Full:
sys.stderr.write(
"[WARNING ] Message queue is full, "
'unable to write "{}" to log.\n'.format(record)
@ -244,7 +301,7 @@ else:
"""
try:
self.queue.put_nowait(record)
except queue.Full:
except _queue.Full:
sys.stderr.write(
"[WARNING ] Message queue is full, "
'unable to write "{}" to log.\n'.format(record)

View file

@ -5,11 +5,16 @@
Salt's logging implementation classes/functionality
"""
import atexit
import logging
import multiprocessing
import os
import re
import socket
import sys
import traceback
import types
import urllib.parse
# Let's define these custom logging levels before importing the salt._logging.mixins
# since they will be used there
@ -18,15 +23,15 @@ TRACE = logging.TRACE = 5
GARBAGE = logging.GARBAGE = 1
QUIET = logging.QUIET = 1000
from salt._logging.handlers import DeferredStreamHandler # isort:skip
from salt._logging.handlers import RotatingFileHandler # isort:skip
from salt._logging.handlers import StreamHandler # isort:skip
# from salt._logging.handlers import SysLogHandler # isort:skip
# from salt._logging.handlers import RotatingFileHandler # isort:skip
# from salt._logging.handlers import WatchedFileHandler # isort:skip
from salt._logging.handlers import TemporaryLoggingHandler # isort:skip
from salt._logging.handlers import SysLogHandler # isort:skip
from salt._logging.handlers import WatchedFileHandler # isort:skip
from salt._logging.mixins import LoggingMixinMeta # isort:skip
from salt.exceptions import LoggingRuntimeError # isort:skip
from salt.utils.ctx import RequestContext # isort:skip
from salt.utils.immutabletypes import freeze, ImmutableDict # isort:skip
from salt.utils.textformat import TextFormat # isort:skip
LOG_LEVELS = {
@ -90,31 +95,12 @@ SORTED_LEVEL_NAMES = [l[0] for l in sorted(LOG_LEVELS.items(), key=lambda x: x[1
MODNAME_PATTERN = re.compile(r"(?P<name>%%\(name\)(?:\-(?P<digits>[\d]+))?s)")
# ----- REMOVE ME ON REFACTOR COMPLETE ------------------------------------------------------------------------------>
class __NullLoggingHandler(TemporaryLoggingHandler):
"""
This class exists just to better identify which temporary logging
handler is being used for what.
"""
class __StoreLoggingHandler(TemporaryLoggingHandler):
"""
This class exists just to better identify which temporary logging
handler is being used for what.
"""
# Store a reference to the temporary queue logging handler
LOGGING_NULL_HANDLER = __NullLoggingHandler(logging.WARNING)
# Store a reference to the temporary console logger
LOGGING_TEMP_HANDLER = StreamHandler(sys.stderr)
# Store a reference to the "storing" logging handler
LOGGING_STORE_HANDLER = __StoreLoggingHandler()
# <---- REMOVE ME ON REFACTOR COMPLETE -------------------------------------------------------------------------------
# Default logging formatting options
DFLT_LOG_FMT_JID = "[JID: %(jid)s]"
DFLT_LOG_DATEFMT = "%H:%M:%S"
DFLT_LOG_DATEFMT_LOGFILE = "%Y-%m-%d %H:%M:%S"
DFLT_LOG_FMT_CONSOLE = "[%(levelname)-8s] %(message)s"
DFLT_LOG_FMT_LOGFILE = "%(asctime)s,%(msecs)03d [%(name)-17s:%(lineno)-4d][%(levelname)-8s][%(process)d] %(message)s"
class SaltLogRecord(logging.LogRecord):
@ -188,11 +174,7 @@ class SaltLoggingClass(LOGGING_LOGGER_CLASS, metaclass=LoggingMixinMeta):
max(list(logging.Logger.manager.loggerDict), key=len)
)
for handler in logging.root.handlers:
if handler in (
LOGGING_NULL_HANDLER,
LOGGING_STORE_HANDLER,
LOGGING_TEMP_HANDLER,
):
if handler == get_temp_handler():
continue
formatter = handler.formatter
@ -282,11 +264,7 @@ class SaltLoggingClass(LOGGING_LOGGER_CLASS, metaclass=LoggingMixinMeta):
else:
extra["exc_info_on_loglevel"] = exc_info_on_loglevel
if sys.version_info < (3,):
LOGGING_LOGGER_CLASS._log(
self, level, msg, args, exc_info=exc_info, extra=extra
)
elif sys.version_info < (3, 8):
if sys.version_info < (3, 8):
LOGGING_LOGGER_CLASS._log(
self,
level,
@ -381,6 +359,135 @@ class SaltLoggingClass(LOGGING_LOGGER_CLASS, metaclass=LoggingMixinMeta):
return logrecord
def get_logging_level_from_string(level):
"""
Return an integer matching a logging level.
Return logging.ERROR when not matching.
Return logging.WARNING when the passed level is None
"""
if level is None:
return logging.WARNING
if isinstance(level, int):
# Level is already an integer, return it
return level
try:
return LOG_LEVELS[level.lower()]
except KeyError:
if level:
log.warning(
"Could not translate the logging level string '%s' "
"into an actual logging level integer. Returning "
"'logging.ERROR'.",
level,
)
# Couldn't translate the passed string into a logging level.
return logging.ERROR
def get_logging_options_dict():
"""
Return the logging options dictionary
"""
try:
return set_logging_options_dict.__options_dict__
except AttributeError:
return
def set_logging_options_dict(opts):
"""
Create a logging related options dictionary based off of the loaded salt config
"""
try:
if isinstance(set_logging_options_dict.__options_dict__, ImmutableDict):
raise RuntimeError(
"The logging options have been frozen. They can no longer be changed."
)
except AttributeError:
pass
set_logging_options_dict.__options_dict__ = opts
def freeze_logging_options_dict():
"""
Turn the logging options dictionary into an immutable dictionary
"""
set_logging_options_dict.__options_dict__ = freeze(
set_logging_options_dict.__options_dict__
)
def is_temp_handler_configured():
"""
Is the temporary deferred stream handler configured
"""
return get_temp_handler() is not None
def get_temp_handler():
"""
Get the temporary deferred stream handler
"""
try:
return setup_temp_handler.__handler__
except AttributeError:
return
def setup_temp_handler(log_level=None):
"""
Setup the temporary deferred stream handler
"""
if is_temp_handler_configured():
handler = get_temp_handler()
log_level = get_logging_level_from_string(log_level)
if handler.level != log_level:
handler.setLevel(log_level)
return
if log_level is None:
log_level = logging.WARNING
log_level = get_logging_level_from_string(log_level)
handler = None
for handler in logging.root.handlers:
if not hasattr(handler, "stream"):
# Not a stream handler, continue
continue
if handler.stream is sys.stderr:
# There's already a logging handler outputting to sys.stderr
break
else:
handler = DeferredStreamHandler(sys.stderr)
atexit.register(handler.flush)
handler.setLevel(log_level)
# Set the default temporary console formatter config
formatter = logging.Formatter(DFLT_LOG_FMT_CONSOLE, datefmt=DFLT_LOG_DATEFMT)
handler.setFormatter(formatter)
logging.root.addHandler(handler)
setup_temp_handler.__handler__ = handler
def shutdown_temp_handler():
"""
Shutdown the temporary deferred stream handler
"""
temp_handler = get_temp_handler()
if temp_handler is not None:
for handler in logging.root.handlers[:]:
if handler is temp_handler:
handler.close()
break
# Redefine the handler to None so it can be garbage collected
setup_temp_handler.__handler__ = None
# Override the python's logging logger class as soon as this module is imported
if logging.getLoggerClass() is not SaltLoggingClass:
@ -401,21 +508,15 @@ if logging.getLoggerClass() is not SaltLoggingClass:
# add a verbose method which is what pip expects
SaltLoggingClass.verbose = SaltLoggingClass.debug
# ----- REMOVE ON REFACTORING COMPLETE -------------------------------------------------------------------------->
if not logging.root.handlers:
# No configuration to the logging system has been done so far.
# Set the root logger at the lowest level possible
logging.root.setLevel(GARBAGE)
# Add a Null logging handler until logging is configured(will be
# removed at a later stage) so we stop getting:
# Add a permanent null handler so that we never get messages like:
# No handlers could be found for logger 'foo'
logging.root.addHandler(LOGGING_NULL_HANDLER)
# Add the queue logging handler so we can later sync all message records
# with the additional logging handlers
logging.root.addHandler(LOGGING_STORE_HANDLER)
# <---- REMOVE ON REFACTORING COMPLETE ---------------------------------------------------------------------------
setup_temp_handler()
logging.root.addHandler(get_temp_handler())
# Now that we defined the default logging logger class, we can instantiate our logger
@ -423,32 +524,485 @@ if logging.getLoggerClass() is not SaltLoggingClass:
log = logging.getLogger(__name__)
def __get_exposed_module_attributes():
def get_console_handler():
"""
This function just ``dir()``'s this module and filters out any functions
or variables which should not be available when wildcard importing it
Get the console stream handler
"""
exposed = []
module = sys.modules[__name__]
for name in dir(module):
if name.startswith("_"):
continue
obj = getattr(module, name)
if not isinstance(obj, types.FunctionType):
if name.startswith(("LOG_", "SORTED_")):
exposed.append(name)
continue
if obj.__module__ != __name__:
continue
exposed.append(name)
return exposed
try:
return setup_console_handler.__handler__
except AttributeError:
return
# Define what can be imported by wildcard imports
__all__ = __get_exposed_module_attributes()
def is_console_handler_configured():
"""
Is the console stream handler configured
"""
return get_console_handler() is not None
# We're done with the function, nuke it
del __get_exposed_module_attributes
def shutdown_console_handler():
"""
Shutdown the console stream handler
"""
console_handler = get_console_handler()
if console_handler is not None:
console_handler.close()
setup_console_handler.__handler__ = None
def setup_console_handler(log_level=None, log_format=None, date_format=None):
"""
Setup the console stream handler
"""
if is_console_handler_configured():
log.warning("Console logging already configured")
return
log.trace(
"Setting up console logging: %s",
dict(log_level=log_level, log_format=log_format, date_format=date_format),
)
if log_level is None:
log_level = logging.WARNING
log_level = get_logging_level_from_string(log_level)
set_log_record_factory(SaltColorLogRecord)
handler = None
for handler in logging.root.handlers:
if handler == get_temp_handler():
continue
if not hasattr(handler, "stream"):
# Not a stream handler, continue
continue
if handler.stream is sys.stderr:
# There's already a logging handler outputting to sys.stderr
break
else:
handler = StreamHandler(sys.stderr)
handler.setLevel(log_level)
# Set the default console formatter config
if not log_format:
log_format = DFLT_LOG_FMT_CONSOLE
if not date_format:
date_format = DFLT_LOG_DATEFMT
formatter = logging.Formatter(log_format, datefmt=date_format)
handler.setFormatter(formatter)
logging.root.addHandler(handler)
setup_console_handler.__handler__ = handler
def get_logfile_handler():
"""
Get the log file handler
"""
try:
return setup_logfile_handler.__handler__
except AttributeError:
return
def is_logfile_handler_configured():
"""
Is the log file handler configured
"""
return get_logfile_handler() is not None
def shutdown_logfile_handler():
"""
Shutdown the log file handler
"""
logfile_handler = get_logfile_handler()
if logfile_handler is not None:
logfile_handler.close()
setup_logfile_handler.__handler__ = None
def setup_logfile_handler(
log_path,
log_level=None,
log_format=None,
date_format=None,
max_bytes=0,
backup_count=0,
user=None,
):
"""
Setup the log file handler
Since version 0.10.6 we support logging to syslog, some examples:
tcp://localhost:514/LOG_USER
tcp://localhost/LOG_DAEMON
udp://localhost:5145/LOG_KERN
udp://localhost
file:///dev/log
file:///dev/log/LOG_SYSLOG
file:///dev/log/LOG_DAEMON
The above examples are self explanatory, but:
<file|udp|tcp>://<host|socketpath>:<port-if-required>/<log-facility>
Thinking on doing remote logging you might also be thinking that
you could point Salt's logging to the remote syslog. **Please Don't!**
An issue has been reported when doing this over TCP where the logged lines
get concatenated. See #3061.
The preferred way to do remote logging is setup a local syslog, point
Salt's logging to the local syslog(unix socket is much faster) and then
have the local syslog forward the log messages to the remote syslog.
"""
if is_logfile_handler_configured():
log.warning("Logfile logging already configured")
return
log.trace(
"Setting up log file logging: %s",
dict(
log_path=log_path,
log_level=log_level,
log_format=log_format,
date_format=date_format,
max_bytes=max_bytes,
backup_count=backup_count,
user=user,
),
)
if log_path is None:
log.warning("log_path setting is set to `None`. Nothing else to do")
return
if log_level is None:
log_level = logging.WARNING
log_level = get_logging_level_from_string(log_level)
parsed_log_path = urllib.parse.urlparse(log_path)
if parsed_log_path.scheme in ("tcp", "udp", "file"):
syslog_opts = {
"facility": SysLogHandler.LOG_USER,
"socktype": socket.SOCK_DGRAM,
}
if parsed_log_path.scheme == "file" and parsed_log_path.path:
facility_name = parsed_log_path.path.split(os.sep)[-1].upper()
if not facility_name.startswith("LOG_"):
# The user is not specifying a syslog facility
facility_name = "LOG_USER" # Syslog default
syslog_opts["address"] = parsed_log_path.path
else:
# The user has set a syslog facility, let's update the path to
# the logging socket
syslog_opts["address"] = os.sep.join(
parsed_log_path.path.split(os.sep)[:-1]
)
elif parsed_log_path.path:
# In case of udp or tcp with a facility specified
facility_name = parsed_log_path.path.lstrip(os.sep).upper()
if not facility_name.startswith("LOG_"):
# Logging facilities start with LOG_ if this is not the case
# fail right now!
raise LoggingRuntimeError(
"The syslog facility '{}' is not known".format(facility_name)
)
else:
# This is the case of udp or tcp without a facility specified
facility_name = "LOG_USER" # Syslog default
facility = getattr(SysLogHandler, facility_name, None)
if facility is None:
# This python syslog version does not know about the user provided
# facility name
raise LoggingRuntimeError(
"The syslog facility '{}' is not known".format(facility_name)
)
syslog_opts["facility"] = facility
if parsed_log_path.scheme == "tcp":
syslog_opts["socktype"] = socket.SOCK_STREAM
if parsed_log_path.scheme in ("tcp", "udp"):
syslog_opts["address"] = (
parsed_log_path.hostname,
parsed_log_path.port or logging.handlers.SYSLOG_UDP_PORT,
)
if parsed_log_path.scheme == "file":
syslog_opts.pop("socktype", None)
try:
# Et voilá! Finally our syslog handler instance
handler = SysLogHandler(**syslog_opts)
except OSError as err:
log.error("Failed to setup the Syslog logging handler: %s", err)
sys.exit(2)
else:
# make sure, the logging directory exists and attempt to create it if necessary
if user is None:
import salt.utils.user
user = salt.utils.user.get_user()
import salt.utils.files
import salt.utils.verify
# Logfile is not using Syslog, verify
with salt.utils.files.set_umask(0o027):
salt.utils.verify.verify_log_files([log_path], user)
try:
# Logfile logging is UTF-8 on purpose.
# Since salt uses YAML and YAML uses either UTF-8 or UTF-16, if a
# user is not using plain ASCII, their system should be ready to
# handle UTF-8.
if max_bytes > 0:
handler = RotatingFileHandler(
log_path,
mode="a",
maxBytes=max_bytes,
backupCount=backup_count,
encoding="utf-8",
delay=0,
)
else:
handler = WatchedFileHandler(
log_path, mode="a", encoding="utf-8", delay=0
)
except OSError:
log.warning(
"Failed to open log file, do you have permission to write to %s?",
log_path,
)
# Do not proceed with any more configuration since it will fail, we
# have the console logging already setup and the user should see
# the error.
return
handler.setLevel(log_level)
if not log_format:
log_format = DFLT_LOG_FMT_LOGFILE
if not date_format:
date_format = DFLT_LOG_DATEFMT_LOGFILE
formatter = logging.Formatter(log_format, datefmt=date_format)
handler.setFormatter(formatter)
logging.root.addHandler(handler)
setup_logfile_handler.__handler__ = handler
def get_extended_logging_handlers():
"""
Get the extended logging handlers
"""
try:
return setup_extended_logging.__handlers__
except AttributeError:
return
def is_extended_logging_configured():
"""
Are the extended logging handlers configured
"""
extended_logging_handlers = get_extended_logging_handlers()
if extended_logging_handlers is None:
return False
return True
def shutdown_extended_logging():
"""
Shutdown the extended logging handlers
"""
extended_logging_handlers = get_extended_logging_handlers()
if extended_logging_handlers:
for handler in extended_logging_handlers:
handler.close()
setup_extended_logging.__handlers__ = None
def setup_extended_logging(opts):
"""
Setup the extended logging handlers, internal or external
"""
if is_extended_logging_configured() is True:
# Don't re-configure external loggers
return
# Explicit late import of Salt's loader
import salt.loader
# Be sure to always shutdown extened logging on process termination
atexit.register(shutdown_extended_logging)
# Let's keep a reference to the current logging handlers
initial_handlers = logging.root.handlers[:]
# Load any additional logging handlers
providers = salt.loader.log_handlers(opts)
# Let's keep track of the new logging handlers so we can sync the stored
# log records with them
additional_handlers = []
for name, get_handlers_func in providers.items():
log.info("Processing 'log_handlers.%s'", name)
# Keep a reference to the logging handlers count before getting the
# possible additional ones.
initial_handlers_count = len(logging.root.handlers)
handlers = get_handlers_func()
if isinstance(handlers, types.GeneratorType):
handlers = list(handlers)
elif handlers is False or handlers == [False]:
# A false return value means not configuring any logging handler on
# purpose
log.info(
"The `log_handlers.%s.setup_handlers()` function returned "
"`False` which means no logging handler was configured on "
"purpose. Continuing...",
name,
)
continue
else:
# Make sure we have an iterable
handlers = [handlers]
for handler in handlers:
if not handler and len(logging.root.handlers) == initial_handlers_count:
log.info(
"The `log_handlers.%s`, did not return any handlers and the "
"global handlers count did not increase. This could be a sign "
"that `log_handlers.%s` is not working as expected.",
name,
name,
)
continue
log.debug("Adding the '%s' provided logging handler: '%s'", name, handler)
additional_handlers.append(handler)
logging.root.addHandler(handler)
for handler in logging.root.handlers:
if handler in initial_handlers:
continue
additional_handlers.append(handler)
setup_extended_logging.__handlers__ = additional_handlers
def setup_log_granular_levels(log_granular_levels):
"""
Get the extended logging handlers
"""
for handler_name, handler_level in log_granular_levels.items():
_logger = logging.getLogger(handler_name)
_logger.setLevel(get_logging_level_from_string(handler_level))
def setup_logging():
opts = get_logging_options_dict()
if not opts:
raise RuntimeError("The logging options have not been set yet.")
if (
opts.get("configure_console_logger", True)
and not is_console_handler_configured()
):
setup_console_handler(
log_level=opts["log_level"],
log_format=opts["log_fmt_console"],
date_format=opts["log_datefmt"],
)
if opts.get("configure_file_logger", True) and not is_logfile_handler_configured():
log_file_level = opts["log_level_logfile"] or opts["log_level"]
if log_file_level != "quiet":
log_file_key = opts.get("log_file_key") or "log_file"
setup_logfile_handler(
log_path=opts[log_file_key],
log_level=log_file_level,
log_format=opts["log_fmt_logfile"],
date_format=opts["log_datefmt_logfile"],
max_bytes=opts["log_rotate_max_bytes"],
backup_count=opts["log_rotate_backup_count"],
user=opts["user"],
)
else:
setup_logfile_handler.__handler__ = logging.NullHandler()
if (
opts.get("configure_ext_handlers", True)
and not is_extended_logging_configured()
):
setup_extended_logging(opts)
if opts.get("configure_granular_levels", True):
setup_log_granular_levels(opts["log_granular_levels"])
def shutdown_logging():
if is_extended_logging_configured():
shutdown_extended_logging()
if is_logfile_handler_configured():
shutdown_logfile_handler()
if is_console_handler_configured():
shutdown_console_handler()
def in_mainprocess():
"""
Check to see if this is the main process
"""
try:
return in_mainprocess.__pid__ == os.getpid()
except AttributeError:
if multiprocessing.current_process().name == "MainProcess":
in_mainprocess.__pid__ = os.getpid()
return True
return False
def get_lowest_log_level():
"""
Get the lowest log level
"""
try:
return set_lowest_log_level.__log_level__
except AttributeError:
return
def set_lowest_log_level(log_level):
"""
Set the lowest log level
"""
set_lowest_log_level.__log_level__ = log_level
def set_lowest_log_level_by_opts(opts):
"""
Set the lowest log level by the passed config
"""
log_levels = [
get_logging_level_from_string(opts.get("log_level")),
get_logging_level_from_string(opts.get("log_level_logfile")),
]
for log_level in opts.get("log_granular_levels", {}).values():
log_levels.append(get_logging_level_from_string(log_level))
log_level = min(log_levels)
set_lowest_log_level.__log_level__ = log_level
def __global_logging_exception_handler(
@ -467,7 +1021,7 @@ def __global_logging_exception_handler(
return
# Log the exception
msg = "An un-handled exception was caught by salt's global exception handler:"
msg = "An un-handled exception was caught by Salt's global exception handler:"
try:
msg = "{}\n{}: {}\n{}".format(
msg,

View file

@ -9,6 +9,10 @@ import logging
import sys
import weakref
import salt._logging
log = logging.getLogger(__name__)
class LoggingProfileMixin:
"""
@ -138,42 +142,24 @@ class MultiprocessingStateMixin:
# __setstate__ and __getstate__ are only used on spawning platforms.
def __setstate__(self, state):
# Deferred to avoid circular imports
import salt.log.setup
logging_config = state["logging_config"]
if not salt._logging.get_logging_options_dict():
salt._logging.set_logging_options_dict(logging_config)
# If __setstate__ is getting called it means this is running on a
# new process. Setup logging.
# Setup logging on the new process
try:
salt.log.setup.set_multiprocessing_logging_queue(state["log_queue"])
except Exception: # pylint: disable=broad-except
logging.getLogger(__name__).exception(
"Failed to run salt.log.setup.set_multiprocessing_logging_queue() on %s",
salt._logging.setup_logging()
except Exception as exc: # pylint: disable=broad-except
log.exception(
"Failed to configure logging on %s: %s",
self,
exc,
)
try:
salt.log.setup.set_multiprocessing_logging_level(state["log_queue_level"])
except Exception: # pylint: disable=broad-except
logging.getLogger(__name__).exception(
"Failed to run salt.log.setup.set_multiprocessing_logging_level() on %s",
self,
)
try:
salt.log.setup.setup_multiprocessing_logging(state["log_queue"])
except Exception: # pylint: disable=broad-except
logging.getLogger(__name__).exception(
"Failed to run salt.log.setup.setup_multiprocessing_logging() on %s",
self,
)
weakref.finalize(self, salt.log.setup.shutdown_multiprocessing_logging)
# Be sure to shutdown logging when terminating the process
weakref.finalize(self, salt._logging.shutdown_logging)
def __getstate__(self):
# Deferred to avoid circular imports
import salt.log.setup
# Grab the current multiprocessing logging settings
log_queue = salt.log.setup.get_multiprocessing_logging_queue()
log_queue_level = salt.log.setup.get_multiprocessing_logging_level()
# Grab the current logging settings
return {
"log_queue": log_queue,
"log_queue_level": log_queue_level,
"logging_config": salt._logging.get_logging_options_dict(),
}

View file

@ -28,6 +28,13 @@ import salt.utils.validate.path
import salt.utils.xdg
import salt.utils.yaml
import salt.utils.zeromq
from salt._logging import (
DFLT_LOG_DATEFMT,
DFLT_LOG_DATEFMT_LOGFILE,
DFLT_LOG_FMT_CONSOLE,
DFLT_LOG_FMT_JID,
DFLT_LOG_FMT_LOGFILE,
)
try:
import psutil
@ -40,14 +47,6 @@ except ImportError:
log = logging.getLogger(__name__)
_DFLT_LOG_DATEFMT = "%H:%M:%S"
_DFLT_LOG_DATEFMT_LOGFILE = "%Y-%m-%d %H:%M:%S"
_DFLT_LOG_FMT_CONSOLE = "[%(levelname)-8s] %(message)s"
_DFLT_LOG_FMT_LOGFILE = (
"%(asctime)s,%(msecs)03d [%(name)-17s:%(lineno)-4d][%(levelname)-8s][%(process)d]"
" %(message)s"
)
_DFLT_LOG_FMT_JID = "[JID: %(jid)s]"
_DFLT_REFSPECS = ["+refs/heads/*:refs/remotes/origin/*", "+refs/tags/*:refs/tags/*"]
DEFAULT_INTERVAL = 60
@ -1144,11 +1143,11 @@ DEFAULT_MINION_OPTS = immutabletypes.freeze(
"log_file": os.path.join(salt.syspaths.LOGS_DIR, "minion"),
"log_level": "warning",
"log_level_logfile": None,
"log_datefmt": _DFLT_LOG_DATEFMT,
"log_datefmt_logfile": _DFLT_LOG_DATEFMT_LOGFILE,
"log_fmt_console": _DFLT_LOG_FMT_CONSOLE,
"log_fmt_logfile": _DFLT_LOG_FMT_LOGFILE,
"log_fmt_jid": _DFLT_LOG_FMT_JID,
"log_datefmt": DFLT_LOG_DATEFMT,
"log_datefmt_logfile": DFLT_LOG_DATEFMT_LOGFILE,
"log_fmt_console": DFLT_LOG_FMT_CONSOLE,
"log_fmt_logfile": DFLT_LOG_FMT_LOGFILE,
"log_fmt_jid": DFLT_LOG_FMT_JID,
"log_granular_levels": {},
"log_rotate_max_bytes": 0,
"log_rotate_backup_count": 0,
@ -1471,11 +1470,11 @@ DEFAULT_MASTER_OPTS = immutabletypes.freeze(
"log_file": os.path.join(salt.syspaths.LOGS_DIR, "master"),
"log_level": "warning",
"log_level_logfile": None,
"log_datefmt": _DFLT_LOG_DATEFMT,
"log_datefmt_logfile": _DFLT_LOG_DATEFMT_LOGFILE,
"log_fmt_console": _DFLT_LOG_FMT_CONSOLE,
"log_fmt_logfile": _DFLT_LOG_FMT_LOGFILE,
"log_fmt_jid": _DFLT_LOG_FMT_JID,
"log_datefmt": DFLT_LOG_DATEFMT,
"log_datefmt_logfile": DFLT_LOG_DATEFMT_LOGFILE,
"log_fmt_console": DFLT_LOG_FMT_CONSOLE,
"log_fmt_logfile": DFLT_LOG_FMT_LOGFILE,
"log_fmt_jid": DFLT_LOG_FMT_JID,
"log_granular_levels": {},
"log_rotate_max_bytes": 0,
"log_rotate_backup_count": 0,
@ -1662,11 +1661,11 @@ DEFAULT_CLOUD_OPTS = immutabletypes.freeze(
"log_file": os.path.join(salt.syspaths.LOGS_DIR, "cloud"),
"log_level": "warning",
"log_level_logfile": None,
"log_datefmt": _DFLT_LOG_DATEFMT,
"log_datefmt_logfile": _DFLT_LOG_DATEFMT_LOGFILE,
"log_fmt_console": _DFLT_LOG_FMT_CONSOLE,
"log_fmt_logfile": _DFLT_LOG_FMT_LOGFILE,
"log_fmt_jid": _DFLT_LOG_FMT_JID,
"log_datefmt": DFLT_LOG_DATEFMT,
"log_datefmt_logfile": DFLT_LOG_DATEFMT_LOGFILE,
"log_fmt_console": DFLT_LOG_FMT_CONSOLE,
"log_fmt_logfile": DFLT_LOG_FMT_LOGFILE,
"log_fmt_jid": DFLT_LOG_FMT_JID,
"log_granular_levels": {},
"log_rotate_max_bytes": 0,
"log_rotate_backup_count": 0,

View file

@ -10,7 +10,7 @@
"""
# Import several classes/functions from salt.log.setup for backwards compatibility
from salt._logging.impl import LOG_LEVELS, SORTED_LEVEL_NAMES
from salt._logging import LOG_LEVELS, SORTED_LEVEL_NAMES
from salt.log.setup import (
is_console_configured,
is_logfile_configured,

View file

@ -78,11 +78,10 @@ import logging.handlers
import socket
import threading
import time
import types
import salt.utils.msgpack
import salt.utils.network
from salt.log.setup import LOG_LEVELS
from salt._logging import LOG_LEVELS
log = logging.getLogger(__name__)
@ -233,7 +232,7 @@ class MessageFormatter(logging.Formatter):
# These are already handled above or explicitly pruned.
continue
if isinstance(value, (str, bool, dict, float, int, list, types.NoneType)):
if value is None or isinstance(value, (str, bool, dict, float, int, list)):
val = value
else:
val = repr(value)
@ -282,7 +281,7 @@ class MessageFormatter(logging.Formatter):
# These are already handled above or explicitly avoided.
continue
if isinstance(value, (str, bool, dict, float, int, list, types.NoneType)):
if value is None or isinstance(value, (str, bool, dict, float, int, list)):
val = value
else:
val = repr(value)

View file

@ -37,7 +37,7 @@
import logging
import socket
from salt.log.setup import LOG_LEVELS
from salt._logging import LOG_LEVELS
try:
from log4mongo.handlers import MongoHandler, MongoFormatter

View file

@ -163,7 +163,7 @@ import os
import salt.utils.json
import salt.utils.network
import salt.utils.stringutils
from salt.log.setup import LOG_LEVELS
from salt._logging import LOG_LEVELS
try:
import zmq

View file

@ -89,7 +89,7 @@ import logging
import re
import salt.loader
from salt.log import LOG_LEVELS
from salt._logging import LOG_LEVELS
try:
import raven
@ -100,8 +100,6 @@ except ImportError:
HAS_RAVEN = False
log = logging.getLogger(__name__)
__grains__ = {}
__salt__ = {}
# Define the module's virtual name
__virtualname__ = "sentry"

View file

@ -11,6 +11,7 @@
module salt or any salt depending library imports so any new logging
logger instance uses our ``salt.log.setup.SaltLoggingClass``.
"""
# pylint: disable=unused-import
import logging
import logging.handlers
@ -22,13 +23,6 @@ import time
import types
import urllib.parse
# pylint: disable=unused-import
from salt._logging import (
LOG_COLORS,
LOG_LEVELS,
LOG_VALUES_TO_LEVELS,
SORTED_LEVEL_NAMES,
)
from salt._logging.handlers import (
FileHandler,
QueueHandler,
@ -38,17 +32,16 @@ from salt._logging.handlers import (
WatchedFileHandler,
)
from salt._logging.impl import (
LOGGING_NULL_HANDLER,
LOGGING_STORE_HANDLER,
LOGGING_TEMP_HANDLER,
LOG_COLORS,
LOG_LEVELS,
LOG_VALUES_TO_LEVELS,
SORTED_LEVEL_NAMES,
SaltColorLogRecord,
SaltLogRecord,
)
from salt._logging.impl import set_log_record_factory as setLogRecordFactory
from salt.utils.versions import warn_until_date
# pylint: enable=unused-import
warn_until_date(
"20240101",
"Please stop using '{name}' and instead use 'salt._logging'. "
@ -107,6 +100,16 @@ class SaltLogQueueHandler(QueueHandler):
"""
# Store a reference to the temporary queue logging handler
LOGGING_NULL_HANDLER = None
# Store a reference to the temporary console logger
LOGGING_TEMP_HANDLER = None
# Store a reference to the "storing" logging handler
LOGGING_STORE_HANDLER = None
def getLogger(name): # pylint: disable=C0103
"""
This function is just a helper, an alias to:

View file

@ -797,6 +797,9 @@ class ConnectedCache(Process):
def ping_all_connected_minions(opts):
"""
Ping all connected minions.
"""
if opts["minion_data_cache"]:
tgt = list(salt.utils.minions.CkMinions(opts).connected_ids())
form = "list"
@ -808,6 +811,9 @@ def ping_all_connected_minions(opts):
def get_master_key(key_user, opts, skip_perm_errors=False):
"""
Return the master key.
"""
if key_user == "root":
if opts.get("user", "root") != "root":
key_user = opts.get("user", "root")

View file

@ -10,7 +10,7 @@
# pylint: disable=missing-docstring,protected-access,too-many-ancestors,too-few-public-methods
# pylint: disable=attribute-defined-outside-init,no-self-use
import copy
import getpass
import logging
import optparse
@ -21,11 +21,11 @@ import traceback
import types
from functools import partial
import salt._logging
import salt.config as config
import salt.defaults.exitcodes
import salt.exceptions
import salt.features
import salt.log.setup as log
import salt.syspaths as syspaths
import salt.utils.args
import salt.utils.data
@ -41,9 +41,9 @@ import salt.utils.yaml
import salt.version as version
from salt.defaults import DEFAULT_TARGET_DELIM
from salt.utils.validate.path import is_writeable
from salt.utils.verify import verify_log_files
from salt.utils.verify import verify_log, verify_log_files
logger = logging.getLogger(__name__)
log = logging.getLogger(__name__)
def _sorted(mixins_or_funcs):
@ -137,10 +137,8 @@ class OptionParser(optparse.OptionParser):
description = None
# Private attributes
_mixin_prio_ = 100
# Setup multiprocessing logging queue listener
_setup_mp_logging_listener_ = False
# We want this class order to be right before LogLevelMixIn
_mixin_prio_ = sys.maxsize - 200
def __init__(self, *args, **kwargs):
kwargs.setdefault("version", "%prog {}".format(self.VERSION))
@ -182,7 +180,9 @@ class OptionParser(optparse.OptionParser):
# This logging handler will be removed once the proper console or
# logfile logging is setup.
temp_log_level = getattr(self.options, "log_level", None)
log.setup_temp_logger("error" if temp_log_level is None else temp_log_level)
salt._logging.setup_temp_handler(
"error" if temp_log_level is None else temp_log_level
)
# Gather and run the process_<option> functions in the proper order
process_option_funcs = []
@ -192,10 +192,11 @@ class OptionParser(optparse.OptionParser):
process_option_funcs.append(process_option_func)
for process_option_func in _sorted(process_option_funcs):
log.trace("Processing %s", process_option_func)
try:
process_option_func()
except Exception as err: # pylint: disable=broad-except
logger.exception(err)
log.exception(err)
self.error(
"Error while processing {}: {}".format(
process_option_func, traceback.format_exc()
@ -206,10 +207,11 @@ class OptionParser(optparse.OptionParser):
for (
mixin_after_parsed_func
) in self._mixin_after_parsed_funcs: # pylint: disable=no-member
log.trace("Processing %s", mixin_after_parsed_func)
try:
mixin_after_parsed_func(self)
except Exception as err: # pylint: disable=broad-except
logger.exception(err)
log.exception(err)
self.error(
"Error while processing {}: {}".format(
mixin_after_parsed_func, traceback.format_exc()
@ -217,7 +219,7 @@ class OptionParser(optparse.OptionParser):
)
if self.config.get("conf_file", None) is not None: # pylint: disable=no-member
logger.debug(
log.debug(
"Configuration file path: %s",
self.config["conf_file"], # pylint: disable=no-member
)
@ -231,6 +233,7 @@ class OptionParser(optparse.OptionParser):
self, option_list, add_help=add_help
)
for mixin_setup_func in self._mixin_setup_funcs: # pylint: disable=no-member
log.trace("Processing %s", mixin_setup_func)
mixin_setup_func(self)
def _add_version_option(self):
@ -250,23 +253,26 @@ class OptionParser(optparse.OptionParser):
def exit(self, status=0, msg=None):
# Run the functions on self._mixin_after_parsed_funcs
for (
mixin_before_exit_func
) in self._mixin_before_exit_funcs: # pylint: disable=no-member
log.trace("Processing %s", mixin_before_exit_func)
try:
mixin_before_exit_func(self)
except Exception as err: # pylint: disable=broad-except
logger.exception(err)
logger.error(
log.exception(err)
log.error(
"Error while processing %s: %s",
str(mixin_before_exit_func),
mixin_before_exit_func,
traceback.format_exc(),
exc_info_on_loglevel=logging.DEBUG,
)
if self._setup_mp_logging_listener_ is True:
# Stop logging through the queue
log.shutdown_multiprocessing_logging()
# Stop the logging queue listener process
log.shutdown_multiprocessing_logging_listener(daemonizing=True)
# In case we never got logging properly set up
temp_log_handler = salt._logging.get_temp_handler()
if temp_log_handler is not None:
temp_log_handler.flush()
salt._logging.shutdown_temp_handler()
if isinstance(msg, str) and msg and msg[-1] != "\n":
msg = "{}\n".format(msg)
optparse.OptionParser.exit(self, status, msg)
@ -293,6 +299,7 @@ class MergeConfigMixIn(metaclass=MixInMeta):
This mix-in should run last.
"""
# We want this class order to be the last one
_mixin_prio_ = sys.maxsize
def _mixin_setup(self):
@ -407,7 +414,7 @@ class SaltfileMixIn(metaclass=MixInMeta):
self.options.saltfile = os.path.abspath(self.options.saltfile)
# Make sure we let the user know that we will be loading a Saltfile
logger.info("Loading Saltfile from '%s'", str(self.options.saltfile))
log.info("Loading Saltfile from '%s'", self.options.saltfile)
try:
saltfile_config = config._read_conf_file(saltfile)
@ -534,7 +541,7 @@ class ConfigDirMixIn(metaclass=MixInMeta):
config_dir = os.environ.get(self._default_config_dir_env_var_, None)
if not config_dir:
config_dir = self._default_config_dir_
logger.debug("SYSPATHS setup as: %s", str(syspaths.CONFIG_DIR))
log.debug("SYSPATHS setup as: %s", syspaths.CONFIG_DIR)
self.add_option(
"-c",
"--config-dir",
@ -570,7 +577,9 @@ class ConfigDirMixIn(metaclass=MixInMeta):
class LogLevelMixIn(metaclass=MixInMeta):
_mixin_prio_ = 10
# We want this class order to be right before MergeConfigMixIn
_mixin_prio_ = sys.maxsize - 100
_default_logging_level_ = "warning"
_default_logging_logfile_ = None
_logfile_config_setting_name_ = "log_file"
@ -578,7 +587,7 @@ class LogLevelMixIn(metaclass=MixInMeta):
_logfile_loglevel_config_setting_name_ = (
"log_level_logfile" # pylint: disable=invalid-name
)
_skip_console_logging_config_ = False
_console_log_level_cli_flags = ("-l", "--log-level")
def _mixin_setup(self):
if self._default_logging_logfile_ is None:
@ -597,17 +606,15 @@ class LogLevelMixIn(metaclass=MixInMeta):
)
self.add_option_group(group)
if not getattr(self, "_skip_console_logging_config_", False):
group.add_option(
"-l",
"--log-level",
dest=self._loglevel_config_setting_name_,
choices=list(log.LOG_LEVELS),
help="Console logging log level. One of {}. Default: '{}'.".format(
", ".join(["'{}'".format(n) for n in log.SORTED_LEVEL_NAMES]),
self._default_logging_level_,
),
)
group.add_option(
*self._console_log_level_cli_flags,
dest=self._loglevel_config_setting_name_,
choices=list(salt._logging.LOG_LEVELS),
help="Console logging log level. One of {}. Default: '{}'.".format(
", ".join(["'{}'".format(n) for n in salt._logging.SORTED_LEVEL_NAMES]),
self._default_logging_level_,
),
)
def _logfile_callback(option, opt, value, parser, *args, **kwargs):
if not os.path.dirname(value):
@ -628,12 +635,23 @@ class LogLevelMixIn(metaclass=MixInMeta):
group.add_option(
"--log-file-level",
dest=self._logfile_loglevel_config_setting_name_,
choices=list(log.LOG_LEVELS),
choices=list(salt._logging.SORTED_LEVEL_NAMES),
help="Logfile logging log level. One of {}. Default: '{}'.".format(
", ".join(["'{}'".format(n) for n in log.SORTED_LEVEL_NAMES]),
", ".join(["'{}'".format(n) for n in salt._logging.SORTED_LEVEL_NAMES]),
self._default_logging_level_,
),
)
self._mixin_after_parsed_funcs.append(self.__setup_logging_routines)
def __setup_logging_routines(self):
# Now that everything is parsed, let's start configuring logging
self._mixin_after_parsed_funcs.append(self.__setup_console_logger_config)
self._mixin_after_parsed_funcs.append(self.__setup_logfile_logger_config)
self._mixin_after_parsed_funcs.append(self.__setup_logging_config)
self._mixin_after_parsed_funcs.append(self.__verify_logging)
self._mixin_after_parsed_funcs.append(self.__setup_logging)
# Add some termination routines too
self._mixin_before_exit_funcs.append(self.__shutdown_logging)
def process_log_level(self):
if not getattr(self.options, self._loglevel_config_setting_name_, None):
@ -654,19 +672,12 @@ class LogLevelMixIn(metaclass=MixInMeta):
self._default_logging_level_,
)
# Setup extended logging right before the last step
self._mixin_after_parsed_funcs.append(self.__setup_extended_logging)
# Setup the console and log file configuration before the MP logging
# listener because the MP logging listener may need that config.
self._mixin_after_parsed_funcs.append(self.__setup_logfile_logger_config)
self._mixin_after_parsed_funcs.append(self.__setup_console_logger_config)
# Setup the multiprocessing log queue listener if enabled
self._mixin_after_parsed_funcs.append(self._setup_mp_logging_listener)
# Setup the multiprocessing log queue client if listener is enabled
# and using Windows
self._mixin_after_parsed_funcs.append(self._setup_mp_logging_client)
# Setup the console as the last _mixin_after_parsed_func to run
self._mixin_after_parsed_funcs.append(self.__setup_console_logger)
def __shutdown_logging(self):
# In case we never got logging properly set up
temp_log_handler = salt._logging.get_temp_handler()
if temp_log_handler is not None:
temp_log_handler.flush()
salt._logging.shutdown_temp_handler()
def process_log_file(self):
if not getattr(self.options, self._logfile_config_setting_name_, None):
@ -716,6 +727,25 @@ class LogLevelMixIn(metaclass=MixInMeta):
# Remove it from config so it inherits from log_level_logfile
self.config.pop(self._logfile_loglevel_config_setting_name_)
def __setup_console_logger_config(self):
# Since we're not going to be a daemon, setup the console logger
logfmt = self.config.get(
"log_fmt_console",
self.config.get("log_fmt", salt._logging.DFLT_LOG_FMT_CONSOLE),
)
if self.config.get("log_datefmt_console", None) is None:
# Remove it from config so it inherits from log_datefmt
self.config.pop("log_datefmt_console", None)
datefmt = self.config.get(
"log_datefmt_console", self.config.get("log_datefmt", "%Y-%m-%d %H:%M:%S")
)
# Save the settings back to the configuration
self.config["log_fmt_console"] = logfmt
self.config["log_datefmt_console"] = datefmt
def __setup_logfile_logger_config(self):
if (
self._logfile_loglevel_config_setting_name_ in self.config
@ -752,15 +782,16 @@ class LogLevelMixIn(metaclass=MixInMeta):
self.config.pop(self._logfile_config_setting_name_)
if self.config["verify_env"] and self.config["log_level"] not in ("quiet",):
# Verify the logfile if it was explicitly set but do not try to
# verify the default
if logfile is not None:
# Logfile is not using Syslog, verify
with salt.utils.files.set_umask(0o027):
verify_log_files([logfile], self.config["user"])
if self.config[self._logfile_loglevel_config_setting_name_] != "quiet":
# Verify the logfile if it was explicitly set but do not try to
# verify the default
if logfile is not None:
# Logfile is not using Syslog, verify
with salt.utils.files.set_umask(0o027):
verify_log_files([logfile], self.config["user"])
if logfile is None:
# Use the default setting if the logfile wasn't explicity set
# Use the default setting if the logfile wasn't explicitly set
logfile = self._default_logging_logfile_
cli_log_file_fmt = "cli_{}_log_file_fmt".format(
@ -778,7 +809,7 @@ class LogLevelMixIn(metaclass=MixInMeta):
"log_fmt_logfile",
self.config.get(
"log_fmt_console",
self.config.get("log_fmt", config._DFLT_LOG_FMT_CONSOLE),
self.config.get("log_fmt", salt._logging.DFLT_LOG_FMT_CONSOLE),
),
)
@ -822,7 +853,7 @@ class LogLevelMixIn(metaclass=MixInMeta):
if not os.path.isdir(user_salt_dir):
os.makedirs(user_salt_dir, 0o750)
logfile_basename = os.path.basename(self._default_logging_logfile_)
logger.debug(
log.debug(
"The user '%s' is not allowed to write to '%s'. "
"The log file will be stored in '~/.salt/'%s'.log'",
str(current_user),
@ -843,10 +874,10 @@ class LogLevelMixIn(metaclass=MixInMeta):
# Not supported on platforms other than Windows.
# Other platforms may use an external tool such as 'logrotate'
if log_rotate_max_bytes != 0:
logger.warning("'log_rotate_max_bytes' is only supported on Windows")
log.warning("'log_rotate_max_bytes' is only supported on Windows")
log_rotate_max_bytes = 0
if log_rotate_backup_count != 0:
logger.warning("'log_rotate_backup_count' is only supported on Windows")
log.warning("'log_rotate_backup_count' is only supported on Windows")
log_rotate_backup_count = 0
# Save the settings back to the configuration
@ -857,111 +888,25 @@ class LogLevelMixIn(metaclass=MixInMeta):
self.config["log_rotate_max_bytes"] = log_rotate_max_bytes
self.config["log_rotate_backup_count"] = log_rotate_backup_count
def setup_logfile_logger(self):
if salt.utils.platform.is_windows() and self._setup_mp_logging_listener_:
# On Windows when using a logging listener, all log file logging
# will go through the logging listener.
return
logfile = self.config[self._logfile_config_setting_name_]
loglevel = self.config[self._logfile_loglevel_config_setting_name_]
log_file_fmt = self.config["log_fmt_logfile"]
log_file_datefmt = self.config["log_datefmt_logfile"]
log_rotate_max_bytes = self.config["log_rotate_max_bytes"]
log_rotate_backup_count = self.config["log_rotate_backup_count"]
log.setup_logfile_logger(
logfile,
loglevel,
log_format=log_file_fmt,
date_format=log_file_datefmt,
max_bytes=log_rotate_max_bytes,
backup_count=log_rotate_backup_count,
def __setup_logging_config(self):
logging_opts = copy.deepcopy(self.config)
logging_opts["configure_console_logger"] = (
getattr(self.options, "daemon", False) is False
)
for name, level in self.config.get("log_granular_levels", {}).items():
log.set_logger_level(name, level)
def __setup_extended_logging(self):
if salt.utils.platform.is_windows() and self._setup_mp_logging_listener_:
# On Windows when using a logging listener, all extended logging
# will go through the logging listener.
return
log.setup_extended_logging(self.config)
def _get_mp_logging_listener_queue(self):
return log.get_multiprocessing_logging_queue()
def _setup_mp_logging_listener(self):
if self._setup_mp_logging_listener_:
log.setup_multiprocessing_logging_listener(
self.config, self._get_mp_logging_listener_queue()
)
def _setup_mp_logging_client(self):
if self._setup_mp_logging_listener_:
# Set multiprocessing logging level even in non-Windows
# environments. In non-Windows environments, this setting will
# propogate from process to process via fork behavior and will be
# used by child processes if they invoke the multiprocessing
# logging client.
log.set_multiprocessing_logging_level_by_opts(self.config)
if salt.utils.platform.is_windows():
# On Windows, all logging including console and
# log file logging will go through the multiprocessing
# logging listener if it exists.
# This will allow log file rotation on Windows
# since only one process can own the log file
# for log file rotation to work.
log.setup_multiprocessing_logging(self._get_mp_logging_listener_queue())
# Remove the temp logger and any other configured loggers since
# all of our logging is going through the multiprocessing
# logging listener.
log.shutdown_temp_logging()
log.shutdown_console_logging()
log.shutdown_logfile_logging()
def __setup_console_logger_config(self):
# Since we're not going to be a daemon, setup the console logger
logfmt = self.config.get(
"log_fmt_console", self.config.get("log_fmt", config._DFLT_LOG_FMT_CONSOLE)
)
if self.config.get("log_datefmt_console", None) is None:
# Remove it from config so it inherits from log_datefmt
self.config.pop("log_datefmt_console", None)
datefmt = self.config.get(
"log_datefmt_console", self.config.get("log_datefmt", "%Y-%m-%d %H:%M:%S")
)
# Save the settings back to the configuration
self.config["log_fmt_console"] = logfmt
self.config["log_datefmt_console"] = datefmt
def __setup_console_logger(self):
# If daemon is set force console logger to quiet
if getattr(self.options, "daemon", False) is True:
return
if salt.utils.platform.is_windows() and self._setup_mp_logging_listener_:
# On Windows when using a logging listener, all console logging
# will go through the logging listener.
return
logging_opts["log_file_key"] = self._logfile_config_setting_name_
# ensure that yaml stays valid with log output
if getattr(self.options, "output", None) == "yaml":
log_format = "# {}".format(self.config["log_fmt_console"])
else:
log_format = self.config["log_fmt_console"]
logging_opts["log_fmt_console"] = "# {}".format(
logging_opts["log_fmt_console"]
)
salt._logging.set_logging_options_dict(logging_opts)
salt._logging.freeze_logging_options_dict()
log.setup_console_logger(
self.config["log_level"],
log_format=log_format,
date_format=self.config["log_datefmt_console"],
)
for name, level in self.config.get("log_granular_levels", {}).items():
log.set_logger_level(name, level)
def __setup_logging(self):
salt._logging.setup_logging()
def __verify_logging(self):
verify_log(self.config)
class RunUserMixin(metaclass=MixInMeta):
@ -1004,19 +949,22 @@ class DaemonMixIn(metaclass=MixInMeta):
# Log error only when running salt-master as a root user.
# Otherwise this can be ignored, since salt-master is able to
# overwrite the PIDfile on the next start.
err_msg = (
"PIDfile could not be deleted: %s",
str(self.config["pidfile"]),
)
log_error = False
if salt.utils.platform.is_windows():
user = salt.utils.win_functions.get_current_user()
if salt.utils.win_functions.is_admin(user):
logger.info(*err_msg)
logger.debug(str(err))
log_error = True
else:
if not os.getuid():
logger.info(*err_msg)
logger.debug(str(err))
log_error = True
if log_error:
log.info(
"PIDfile(%s) could not be deleted: %s",
self.config["pidfile"],
err,
exc_info_on_loglevel=logging.DEBUG,
)
def set_pidfile(self):
from salt.utils.process import set_pidfile
@ -1041,17 +989,15 @@ class DaemonMixIn(metaclass=MixInMeta):
def daemonize_if_required(self):
if self.options.daemon:
if self._setup_mp_logging_listener_ is True:
# Stop the logging queue listener for the current process
# We'll restart it once forked
log.shutdown_multiprocessing_logging_listener(daemonizing=True)
salt._logging.shutdown_logging()
salt.utils.process.daemonize()
# Because we have daemonized, salt._logging.in_mainprocess() will
# return False. We'll just force it to return True for this
# particular case so that proper logging can be set up.
salt._logging.in_mainprocess.__pid__ = os.getpid()
salt._logging.setup_logging()
salt.utils.process.appendproctitle("MainProcess")
# Setup the multiprocessing log queue listener if enabled
self._setup_mp_logging_listener()
def check_running(self):
"""
Check if a pid file exists and if it is associated with
@ -1982,7 +1928,6 @@ class MasterOptionParser(
_config_filename_ = "master"
# LogLevelMixIn attributes
_default_logging_logfile_ = config.DEFAULT_MASTER_OPTS["log_file"]
_setup_mp_logging_listener_ = True
def setup_config(self):
opts = config.master_config(self.get_config_file_path())
@ -2000,7 +1945,6 @@ class MinionOptionParser(
_config_filename_ = "minion"
# LogLevelMixIn attributes
_default_logging_logfile_ = config.DEFAULT_MINION_OPTS["log_file"]
_setup_mp_logging_listener_ = True
def setup_config(self):
opts = config.minion_config(
@ -2008,14 +1952,6 @@ class MinionOptionParser(
cache_minion_id=True,
ignore_config_errors=False,
)
# Optimization: disable multiprocessing logging if running as a
# daemon, without engines and without multiprocessing
if (
not opts.get("engines")
and not opts.get("multiprocessing", True)
and self.options.daemon
): # pylint: disable=no-member
self._setup_mp_logging_listener_ = False
salt.features.setup_features(opts)
return opts
@ -2080,7 +2016,6 @@ class SyndicOptionParser(
_default_logging_logfile_ = config.DEFAULT_MASTER_OPTS[
_logfile_config_setting_name_
]
_setup_mp_logging_listener_ = True
def setup_config(self):
opts = config.syndic_config(
@ -2523,7 +2458,7 @@ class SaltKeyOptionParser(
_config_filename_ = "master"
# LogLevelMixIn attributes
_skip_console_logging_config_ = True
_console_log_level_cli_flags = ("--log-level",)
_logfile_config_setting_name_ = "key_logfile"
_default_logging_logfile_ = config.DEFAULT_MASTER_OPTS[
_logfile_config_setting_name_
@ -2792,7 +2727,6 @@ class SaltKeyOptionParser(
if self.options.gen_keys:
# Since we're generating the keys, some defaults can be assumed
# or tweaked
keys_config[self._logfile_config_setting_name_] = os.devnull
keys_config["pki_dir"] = self.options.gen_keys_dir
salt.features.setup_features(keys_config)
return keys_config
@ -2837,11 +2771,6 @@ class SaltKeyOptionParser(
self.__create_keys_dir
) # pylint: disable=no-member
def _mixin_after_parsed(self):
# It was decided to always set this to info, since it really all is
# info or error.
self.config["loglevel"] = "info"
def __create_keys_dir(self):
if not os.path.isdir(self.config["gen_keys_dir"]):
os.makedirs(self.config["gen_keys_dir"])

View file

@ -20,8 +20,8 @@ import sys
import threading
import time
import salt._logging
import salt.defaults.exitcodes
import salt.log.setup
import salt.utils.files
import salt.utils.path
import salt.utils.platform
@ -148,6 +148,9 @@ def daemonize_if(opts):
def systemd_notify_call(action):
"""
Notify systemd that this process has started
"""
process = subprocess.Popen(
["systemd-notify", action], stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
@ -895,16 +898,11 @@ class Process(multiprocessing.Process):
return instance
def __init__(self, *args, **kwargs):
log_queue = kwargs.pop("log_queue", None)
log_queue_level = kwargs.pop("log_queue_level", None)
logging_config = kwargs.pop("__logging_config__", None)
super().__init__(*args, **kwargs)
self.log_queue = log_queue
if self.log_queue is None:
self.log_queue = salt.log.setup.get_multiprocessing_logging_queue()
self.log_queue_level = log_queue_level
if self.log_queue_level is None:
self.log_queue_level = salt.log.setup.get_multiprocessing_logging_level()
if logging_config is None:
logging_config = salt._logging.get_logging_options_dict()
self.__logging_config__ = logging_config
# Because we need to enforce our after fork and finalize routines,
# we must wrap this class run method to allow for these extra steps
@ -942,10 +940,8 @@ class Process(multiprocessing.Process):
"""
args = self._args_for_getstate
kwargs = self._kwargs_for_getstate
if "log_queue" not in kwargs:
kwargs["log_queue"] = self.log_queue
if "log_queue_level" not in kwargs:
kwargs["log_queue_level"] = self.log_queue_level
if "__logging_config__" not in kwargs:
kwargs["__logging_config__"] = self.__logging_config__
return {
"args": args,
"kwargs": kwargs,
@ -958,26 +954,15 @@ class Process(multiprocessing.Process):
def wrapped_run_func():
# Static after fork method, always needs to happen first
appendproctitle(self.name)
if not salt._logging.get_logging_options_dict():
salt._logging.set_logging_options_dict(self.__logging_config__)
try:
salt.log.setup.set_multiprocessing_logging_queue(self.log_queue)
except Exception: # pylint: disable=broad-except
salt._logging.setup_logging()
except Exception as exc: # pylint: disable=broad-except
log.exception(
"Failed to run salt.log.setup.set_multiprocessing_logging_queue() on %s",
self,
)
try:
salt.log.setup.set_multiprocessing_logging_level(self.log_queue_level)
except Exception: # pylint: disable=broad-except
log.exception(
"Failed to run salt.log.setup.set_multiprocessing_logging_level() on %s",
self,
)
try:
salt.log.setup.setup_multiprocessing_logging(self.log_queue)
except Exception: # pylint: disable=broad-except
log.exception(
"Failed to run salt.log.setup.setup_multiprocessing_logging() on %s",
"Failed to configure logging on %s: %s",
self,
exc,
)
for method, args, kwargs in self._after_fork_methods:
@ -1025,12 +1010,9 @@ class Process(multiprocessing.Process):
finally:
# Static finalize method, should always run last
try:
salt.log.setup.shutdown_multiprocessing_logging()
except Exception: # pylint: disable=broad-except
log.exception(
"Failed to run salt.log.setup.shutdown_multiprocessing_logging() on %s",
self,
)
salt._logging.shutdown_logging()
except Exception as exc: # pylint: disable=broad-except
log.exception("Failed to shutdown logging on %s: %s", self, exc)
return wrapped_run_func
@ -1106,6 +1088,9 @@ class SignalHandlingProcess(Process):
@contextlib.contextmanager
def default_signals(*signals):
"""
Temporarily restore signals to their default values.
"""
old_signals = {}
for signum in signals:
try:

View file

@ -920,12 +920,10 @@ def gen_min(
"salt/pillar",
"salt/pillar/__init__.py",
"salt/utils/textformat.py",
"salt/log",
"salt/log/__init__.py",
"salt/log/handlers",
"salt/log/handlers/__init__.py",
"salt/log/mixins.py",
"salt/log/setup.py",
"salt/_logging/__init__.py",
"salt/_logging/handlers.py",
"salt/_logging/impl.py",
"salt/_logging/mixins.py",
"salt/cli",
"salt/cli/__init__.py",
"salt/cli/caller.py",

View file

@ -147,6 +147,9 @@ def verify_socket(interface, pub_port, ret_port):
def verify_logs_filter(files):
"""
Filter files to verify.
"""
to_verify = []
for filename in files:
verify_file = True

View file

@ -26,7 +26,7 @@ import _pytest.logging
import _pytest.skipping
import psutil
import pytest
import salt._logging.impl
import salt._logging
import salt._logging.mixins
import salt.config
import salt.loader
@ -1432,4 +1432,21 @@ def ssl_webserver(integration_files_dir, scope="module"):
webserver.stop()
@pytest.fixture(scope="session", autouse=True)
def _disable_salt_logging():
# This fixture is used to set logging to a configuration that salt expects,
# however, no logging is actually configured since pytest's logging will be
# logging what we need.
logging_config = {
# Undocumented, on purpose, at least for now, options.
"configure_ext_handlers": False,
"configure_file_logger": False,
"configure_console_logger": False,
"configure_granular_levels": False,
}
salt._logging.set_logging_options_dict(logging_config)
# Run the test suite
yield
# <---- Custom Fixtures ----------------------------------------------------------------------------------------------

View file

@ -8,6 +8,11 @@ setup.py:
- pytests.scenarios.setup.test_man
- pytests.scenarios.setup.test_install
salt/_logging/(impl|handlers).py:
- unit.logging.test_deferred_stream_handler
- pytests.integration._logging.test_jid_logging
- pytests.integration._logging.test_multiple_processes_logging
salt/modules/(apkpkg|aptpkg|ebuildpkg|dpkg_lowpkg|freebsdpkg|mac_brew_pkg|mac_ports_pkg|openbsdpkg|opkg|pacmanpkg|pkgin|pkgng|pkg_resource|rpm_lowpkg|solarisipspkg|solarispkg|win_pkg|xbpspkg|yumpkg|zypperpkg)\.py:
- pytests.unit.states.test_pkg
- pytests.functional.modules.test_pkg

View file

@ -2,7 +2,7 @@ import os
import sys
import pytest
from salt._logging.impl import get_log_record_factory, set_log_record_factory
from salt._logging import get_log_record_factory, set_log_record_factory
from salt.cli.api import SaltAPI

View file

@ -0,0 +1,68 @@
import pathlib
import time
from types import SimpleNamespace
import pytest
import salt.utils.platform
from _pytest.pytester import LineMatcher
from saltfactories.utils import random_string
@pytest.fixture(scope="module")
def logging_master(salt_factories):
log_format = "|%(name)-17s:%(lineno)-4d|%(levelname)-8s|%(processName)s|PID:%(process)d|%(message)s"
config_overrides = {
"log_level": "debug",
"log_fmt_console": log_format,
"log_level_logfile": "debug",
"log_fmt_logfile": log_format,
}
factory = salt_factories.salt_master_daemon(
random_string("master-logging-"),
overrides=config_overrides,
extra_cli_arguments_after_first_start_failure=["--log-level=debug"],
)
process_pid = None
with factory.started("--log-level=debug"):
process_pid = factory.pid
# Wait a little after the master starts
if not salt.utils.platform.spawning_platform():
time.sleep(2)
else:
time.sleep(10)
ret = factory.terminate()
return SimpleNamespace(
process_pid=process_pid,
ret=ret,
log_file=pathlib.Path(factory.config["log_file"]),
)
@pytest.fixture(scope="module")
def matches(logging_master):
return [
# Each of these is a separate process started by the master
"*|PID:{}|*".format(logging_master.process_pid),
"*|MWorker-*|*",
"*|Maintenance|*",
"*|ReqServer|*",
"*|PubServerChannel._publish_daemon|*",
"*|MWorkerQueue|*",
"*|FileServerUpdate|*",
]
@pytest.mark.windows_whitelisted
def test_multiple_processes_logging_stderr(logging_master, matches):
# Are we seeing the main pid in the STDERR getting logged?
# And there must be more than the main process in the STDERR logs
matcher = LineMatcher(logging_master.ret.stderr.splitlines())
matcher.fnmatch_lines_random(matches)
@pytest.mark.windows_whitelisted
def test_multiple_processes_logging_log_file(logging_master, matches):
# And on the log file, we also have matches?
matcher = LineMatcher(logging_master.log_file.read_text().splitlines())
matcher.fnmatch_lines_random(matches)

View file

@ -14,6 +14,7 @@ import errno
import fnmatch
import functools
import inspect
import io
import json
import logging
import os
@ -1874,3 +1875,48 @@ def get_virtualenv_binary_path():
# We're not running inside a virtualenv
virtualenv_binary = None
return virtualenv_binary
class CaptureOutput:
def __init__(self, capture_stdout=True, capture_stderr=True):
if capture_stdout:
self._stdout = io.StringIO()
else:
self._stdout = None
if capture_stderr:
self._stderr = io.StringIO()
else:
self._stderr = None
self._original_stdout = None
self._original_stderr = None
def __enter__(self):
if self._stdout:
self._original_stdout = sys.stdout
sys.stdout = self._stdout
if self._stderr:
self._original_stderr = sys.stderr
sys.stderr = self._stderr
return self
def __exit__(self, *args):
if self._stdout:
sys.stdout = self._original_stdout
self._original_stdout = None
if self._stderr:
sys.stderr = self._original_stderr
self._original_stderr = None
@property
def stdout(self):
if self._stdout is None:
return
self._stdout.seek(0)
return self._stdout.read()
@property
def stderr(self):
if self._stderr is None:
return
self._stderr.seek(0)
return self._stderr.read()

View file

View file

@ -0,0 +1,298 @@
"""
tests.unit.logging.test_defered_stream_handler
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
import logging
import multiprocessing
import os
import signal
import subprocess
import sys
import time
import salt.utils.files
import salt.utils.platform
from pytestshellutils.utils.processes import terminate_process
from salt.utils.nb_popen import NonBlockingPopen
from tests.support.helpers import dedent
from tests.support.runtests import RUNTIME_VARS
from tests.support.unit import TestCase, skipIf
log = logging.getLogger(__name__)
class TestDeferredStreamHandler(TestCase):
def test_sync_with_handlers(self):
def proc_target():
import sys
import logging
from salt._logging.handlers import DeferredStreamHandler
from tests.support.helpers import CaptureOutput
with CaptureOutput() as stds:
handler = DeferredStreamHandler(sys.stderr)
handler.setLevel(logging.DEBUG)
formatter = logging.Formatter("%(message)s")
handler.setFormatter(formatter)
logging.root.addHandler(handler)
logger = logging.getLogger(__name__)
logger.info("Foo")
logger.info("Bar")
logging.root.removeHandler(handler)
assert not stds.stdout
assert not stds.stderr
stream_handler = logging.StreamHandler(sys.stderr)
# Sync with the other handlers
handler.sync_with_handlers([stream_handler])
assert not stds.stdout
assert stds.stderr == "Foo\nBar\n"
proc = multiprocessing.Process(target=proc_target)
proc.start()
proc.join()
assert proc.exitcode == 0
def test_deferred_write_on_flush(self):
def proc_target():
import sys
import logging
from salt._logging.handlers import DeferredStreamHandler
from tests.support.helpers import CaptureOutput
with CaptureOutput() as stds:
handler = DeferredStreamHandler(sys.stderr)
handler.setLevel(logging.DEBUG)
formatter = logging.Formatter("%(message)s")
handler.setFormatter(formatter)
logging.root.addHandler(handler)
logger = logging.getLogger(__name__)
logger.info("Foo")
logger.info("Bar")
logging.root.removeHandler(handler)
assert not stds.stdout
assert not stds.stderr
# Flush the handler
handler.flush()
assert not stds.stdout
assert stds.stderr == "Foo\nBar\n"
proc = multiprocessing.Process(target=proc_target)
proc.start()
proc.join()
assert proc.exitcode == 0
def test_deferred_write_on_atexit(self):
# Python will .flush() and .close() all logging handlers at interpreter shutdown.
# This should be enough to flush our deferred messages.
pyscript = dedent(
r"""
import sys
import time
import logging
CODE_DIR = {!r}
if CODE_DIR in sys.path:
sys.path.remove(CODE_DIR)
sys.path.insert(0, CODE_DIR)
from salt._logging.handlers import DeferredStreamHandler
# Reset any logging handlers we might have already
logging.root.handlers[:] = []
handler = DeferredStreamHandler(sys.stderr)
handler.setLevel(logging.DEBUG)
logging.root.addHandler(handler)
log = logging.getLogger(__name__)
sys.stdout.write('STARTED\n')
sys.stdout.flush()
log.debug('Foo')
sys.exit(0)
""".format(
RUNTIME_VARS.CODE_DIR
)
)
script_path = os.path.join(RUNTIME_VARS.TMP, "atexit_deferred_logging_test.py")
with salt.utils.files.fopen(script_path, "w") as wfh:
wfh.write(pyscript)
self.addCleanup(os.unlink, script_path)
proc = NonBlockingPopen(
[sys.executable, script_path],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
out = b""
err = b""
# This test should never take more than 5 seconds
execution_time = 5
max_time = time.time() + execution_time
try:
# Just loop consuming output
while True:
if time.time() > max_time:
self.fail(
"Script didn't exit after {} second".format(execution_time)
)
time.sleep(0.125)
_out = proc.recv()
_err = proc.recv_err()
if _out:
out += _out
if _err:
err += _err
if _out is None and _err is None:
# The script exited
break
if proc.poll() is not None:
# The script exited
break
finally:
terminate_process(proc.pid, kill_children=True)
if b"Foo" not in err:
self.fail("'Foo' should be in stderr and it's not: {}".format(err))
@skipIf(salt.utils.platform.is_windows(), "Windows does not support SIGINT")
def test_deferred_write_on_sigint(self):
pyscript = dedent(
r"""
import sys
import time
import signal
import logging
CODE_DIR = {!r}
if CODE_DIR in sys.path:
sys.path.remove(CODE_DIR)
sys.path.insert(0, CODE_DIR)
from salt._logging.handlers import DeferredStreamHandler
# Reset any logging handlers we might have already
logging.root.handlers[:] = []
handler = DeferredStreamHandler(sys.stderr)
handler.setLevel(logging.DEBUG)
logging.root.addHandler(handler)
if signal.getsignal(signal.SIGINT) != signal.default_int_handler:
# Looking at you Debian based distros :/
signal.signal(signal.SIGINT, signal.default_int_handler)
log = logging.getLogger(__name__)
start_printed = False
while True:
try:
log.debug('Foo')
if start_printed is False:
sys.stdout.write('STARTED\n')
sys.stdout.write('SIGINT HANDLER: {{!r}}\n'.format(signal.getsignal(signal.SIGINT)))
sys.stdout.flush()
start_printed = True
time.sleep(0.125)
except (KeyboardInterrupt, SystemExit):
log.info('KeyboardInterrupt caught')
sys.stdout.write('KeyboardInterrupt caught\n')
sys.stdout.flush()
break
log.info('EXITING')
sys.stdout.write('EXITING\n')
sys.stdout.flush()
sys.exit(0)
""".format(
RUNTIME_VARS.CODE_DIR
)
)
script_path = os.path.join(RUNTIME_VARS.TMP, "sigint_deferred_logging_test.py")
with salt.utils.files.fopen(script_path, "w") as wfh:
wfh.write(pyscript)
self.addCleanup(os.unlink, script_path)
proc = NonBlockingPopen(
[sys.executable, script_path],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
out = b""
err = b""
# Test should take less than 20 seconds, way less
execution_time = 10
start = time.time()
max_time = time.time() + execution_time
try:
signalled = False
log.info("Starting Loop")
while True:
time.sleep(0.125)
_out = proc.recv()
_err = proc.recv_err()
if _out:
out += _out
if _err:
err += _err
if b"STARTED" in out and not signalled:
# Enough time has passed
proc.send_signal(signal.SIGINT)
signalled = True
log.debug("Sent SIGINT after: %s", time.time() - start)
if signalled is False:
if out:
self.fail(
"We have stdout output when there should be none: {}".format(
out
)
)
if err:
self.fail(
"We have stderr output when there should be none: {}".format(
err
)
)
if _out is None and _err is None:
log.info("_out and _err are None")
if b"Foo" not in err:
self.fail(
"No more output and 'Foo' should be in stderr and it's not: {}".format(
err
)
)
break
if proc.poll() is not None:
log.debug("poll() is not None")
if b"Foo" not in err:
self.fail(
"Process terminated and 'Foo' should be in stderr and it's not: {}".format(
err
)
)
break
if time.time() > max_time:
log.debug("Reached max time")
if b"Foo" not in err:
self.fail(
"'Foo' should be in stderr and it's not:\n{0}\nSTDERR:\n{0}\n{1}\n{0}\nSTDOUT:\n{0}\n{2}\n{0}".format(
"-" * 80, err, out
)
)
finally:
terminate_process(proc.pid, kill_children=True)
log.debug("Test took %s seconds", time.time() - start)

View file

@ -183,6 +183,7 @@ class BadTestModuleNamesTestCase(TestCase):
"integration.states.test_renderers",
"integration.wheel.test_client",
"unit.cache.test_cache",
"unit.logging.test_deferred_stream_handler",
"unit.serializers.test_serializers",
"unit.setup.test_install",
"unit.setup.test_man",

File diff suppressed because it is too large Load diff