mirror of
https://github.com/saltstack/salt.git
synced 2025-04-17 10:10:20 +00:00
Merge branch '2019.2.1' into 2019.2.1_with_multimaster_bb_fixes
This commit is contained in:
commit
d6ad47579e
5 changed files with 46 additions and 9 deletions
|
@ -623,7 +623,6 @@ class IPCMessageSubscriber(IPCClient):
|
|||
except tornado.gen.TimeoutError:
|
||||
raise tornado.gen.Return(None)
|
||||
|
||||
log.trace('IPC Subscriber is starting reading')
|
||||
exc_to_raise = None
|
||||
ret = None
|
||||
try:
|
||||
|
|
|
@ -543,7 +543,6 @@ class SaltEvent(object):
|
|||
# IPCMessageSubscriber.read_sync() uses this type of timeout.
|
||||
if not self.cpub and not self.connect_pub(timeout=wait):
|
||||
break
|
||||
|
||||
raw = self.subscriber.read_sync(timeout=wait)
|
||||
if raw is None:
|
||||
break
|
||||
|
@ -625,6 +624,7 @@ class SaltEvent(object):
|
|||
request, it MUST subscribe the result to ensure the response is not lost
|
||||
should other regions of code call get_event for other purposes.
|
||||
'''
|
||||
log.trace("Get event. tag: %s", tag)
|
||||
assert self._run_io_loop_sync
|
||||
|
||||
match_func = self._get_match_func(match_type)
|
||||
|
|
|
@ -6,6 +6,7 @@ Functions for daemonizing and otherwise modifying running processes
|
|||
# Import python libs
|
||||
from __future__ import absolute_import, with_statement, print_function, unicode_literals
|
||||
import copy
|
||||
import io
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
|
@ -102,12 +103,32 @@ def daemonize(redirect_out=True):
|
|||
with salt.utils.files.fopen('/dev/null', 'r+') as dev_null:
|
||||
# Redirect python stdin/out/err
|
||||
# and the os stdin/out/err which can be different
|
||||
os.dup2(dev_null.fileno(), sys.stdin.fileno())
|
||||
os.dup2(dev_null.fileno(), sys.stdout.fileno())
|
||||
os.dup2(dev_null.fileno(), sys.stderr.fileno())
|
||||
os.dup2(dev_null.fileno(), 0)
|
||||
os.dup2(dev_null.fileno(), 1)
|
||||
os.dup2(dev_null.fileno(), 2)
|
||||
dup2(dev_null, sys.stdin)
|
||||
dup2(dev_null, sys.stdout)
|
||||
dup2(dev_null, sys.stderr)
|
||||
dup2(dev_null, 0)
|
||||
dup2(dev_null, 1)
|
||||
dup2(dev_null, 2)
|
||||
|
||||
|
||||
def dup2(file1, file2):
|
||||
if isinstance(file1, int):
|
||||
fno1 = file1
|
||||
else:
|
||||
try:
|
||||
fno1 = file1.fileno()
|
||||
except io.UnsupportedOperation:
|
||||
log.warn('Unsupported operation on file: %r', file1)
|
||||
return
|
||||
if isinstance(file2, int):
|
||||
fno2 = file2
|
||||
else:
|
||||
try:
|
||||
fno2 = file2.fileno()
|
||||
except io.UnsupportedOperation:
|
||||
log.warn('Unsupported operation on file: %r', file2)
|
||||
return
|
||||
os.dup2(fno1, fno2)
|
||||
|
||||
|
||||
def daemonize_if(opts):
|
||||
|
|
|
@ -672,7 +672,7 @@ class TestDaemon(object):
|
|||
|
||||
self.sshd_pidfile = os.path.join(RUNTIME_VARS.TMP_CONF_DIR, 'sshd.pid')
|
||||
self.sshd_process = subprocess.Popen(
|
||||
[sshd, '-f', 'sshd_config', '-oPidFile={0}'.format(self.sshd_pidfile)],
|
||||
[sshd, '-f', 'sshd_config', '-o', 'PidFile={0}'.format(self.sshd_pidfile)],
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE,
|
||||
close_fds=True,
|
||||
|
|
|
@ -2,6 +2,7 @@
|
|||
|
||||
# Import python libs
|
||||
from __future__ import absolute_import, print_function, unicode_literals
|
||||
import io
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
|
@ -399,3 +400,19 @@ class TestSignalHandlingMultiprocessingProcess(TestCase):
|
|||
evt.set()
|
||||
proc2.join(30)
|
||||
proc.join(30)
|
||||
|
||||
|
||||
class TestDup2(TestCase):
|
||||
|
||||
def test_dup2_no_fileno(self):
|
||||
'The dup2 method does not fail on streams without fileno support'
|
||||
f1 = io.StringIO("some initial text data")
|
||||
f2 = io.StringIO("some initial other text data")
|
||||
with self.assertRaises(io.UnsupportedOperation):
|
||||
f1.fileno()
|
||||
with patch('os.dup2') as dup_mock:
|
||||
try:
|
||||
salt.utils.process.dup2(f1, f2)
|
||||
except io.UnsupportedOperation:
|
||||
assert False, 'io.UnsupportedOperation was raised'
|
||||
assert not dup_mock.called
|
||||
|
|
Loading…
Add table
Reference in a new issue