mirror of
https://github.com/saltstack/salt.git
synced 2025-04-17 10:10:20 +00:00
Fix parallel states with long ID dec or name
The cache files used for these are based on the state's tag, so longer ID decs or names will cause errors when the filename's length exceeds the max allowed by the kernel. This fixes these errors by taking (up to) the first 32 chars of the result of base64-encoding the tag, and using that as the parallel cache filename.
This commit is contained in:
parent
d2a193b791
commit
b3e9678fd9
3 changed files with 61 additions and 36 deletions
|
@ -1732,7 +1732,9 @@ class State(object):
|
|||
ret['duration'] = duration
|
||||
|
||||
troot = os.path.join(self.opts['cachedir'], self.jid)
|
||||
tfile = os.path.join(troot, _clean_tag(tag))
|
||||
tfile = os.path.join(
|
||||
troot,
|
||||
salt.utils.hashutils.base64_b64encode(tag)[:32])
|
||||
if not os.path.isdir(troot):
|
||||
try:
|
||||
os.makedirs(troot)
|
||||
|
@ -2091,7 +2093,10 @@ class State(object):
|
|||
proc = running[tag].get('proc')
|
||||
if proc:
|
||||
if not proc.is_alive():
|
||||
ret_cache = os.path.join(self.opts['cachedir'], self.jid, _clean_tag(tag))
|
||||
ret_cache = os.path.join(
|
||||
self.opts['cachedir'],
|
||||
self.jid,
|
||||
salt.utils.hashutils.base64_b64encode(tag)[:32])
|
||||
if not os.path.isfile(ret_cache):
|
||||
ret = {'result': False,
|
||||
'comment': 'Parallel process failed to return',
|
||||
|
|
9
tests/integration/files/file/base/issue-49738/init.sls
Normal file
9
tests/integration/files/file/base/issue-49738/init.sls
Normal file
|
@ -0,0 +1,9 @@
|
|||
test_cmd_too_long:
|
||||
cmd.run:
|
||||
- name: {{ pillar['long_command'] }}
|
||||
- parallel: True
|
||||
|
||||
test_cmd_not_found:
|
||||
cmd.run:
|
||||
- name: {{ pillar['short_command'] }}
|
||||
- parallel: True
|
|
@ -12,48 +12,20 @@ import time
|
|||
# Import Salt Testing libs
|
||||
from tests.support.case import ModuleCase
|
||||
from tests.support.unit import skipIf
|
||||
from tests.support.paths import TMP, FILES
|
||||
from tests.support.paths import TMP, BASE_FILES
|
||||
from tests.support.mixins import SaltReturnAssertsMixin
|
||||
|
||||
# Import salt libs
|
||||
import salt.utils
|
||||
import salt.utils.atomicfile
|
||||
from salt.modules.virtualenv_mod import KNOWN_BINARY_NAMES
|
||||
|
||||
# Import 3rd-party libs
|
||||
import salt.ext.six as six
|
||||
|
||||
|
||||
DEFAULT_ENDING = salt.utils.to_bytes(os.linesep)
|
||||
|
||||
|
||||
def trim_line_end(line):
|
||||
'''
|
||||
Remove CRLF or LF from the end of line.
|
||||
'''
|
||||
if line[-2:] == salt.utils.to_bytes('\r\n'):
|
||||
return line[:-2]
|
||||
elif line[-1:] == salt.utils.to_bytes('\n'):
|
||||
return line[:-1]
|
||||
raise Exception("Invalid line ending")
|
||||
|
||||
|
||||
def reline(source, dest, force=False, ending=DEFAULT_ENDING):
|
||||
'''
|
||||
Normalize the line endings of a file.
|
||||
'''
|
||||
fp, tmp = tempfile.mkstemp()
|
||||
os.close(fp)
|
||||
with salt.utils.fopen(tmp, 'wb') as tmp_fd:
|
||||
with salt.utils.fopen(source, 'rb') as fd:
|
||||
lines = fd.readlines()
|
||||
for line in lines:
|
||||
line_noend = trim_line_end(line)
|
||||
tmp_fd.write(line_noend + ending)
|
||||
if os.path.exists(dest) and force:
|
||||
os.remove(dest)
|
||||
os.rename(tmp, dest)
|
||||
|
||||
|
||||
class StateModuleTest(ModuleCase, SaltReturnAssertsMixin):
|
||||
'''
|
||||
Validate the state module
|
||||
|
@ -63,10 +35,21 @@ class StateModuleTest(ModuleCase, SaltReturnAssertsMixin):
|
|||
|
||||
def setUp(self):
|
||||
super(StateModuleTest, self).setUp()
|
||||
destpath = os.path.join(FILES, 'file', 'base', 'testappend', 'firstif')
|
||||
reline(destpath, destpath, force=True)
|
||||
destpath = os.path.join(FILES, 'file', 'base', 'testappend', 'secondif')
|
||||
reline(destpath, destpath, force=True)
|
||||
|
||||
def _reline(path, ending=DEFAULT_ENDING):
|
||||
'''
|
||||
Normalize the line endings of a file.
|
||||
'''
|
||||
with salt.utils.fopen(path, 'rb') as fhr:
|
||||
lines = fhr.read().splitlines()
|
||||
with salt.utils.atomicfile.atomic_open(path, 'wb') as fhw:
|
||||
for line in lines:
|
||||
fhw.write(line + ending)
|
||||
|
||||
destpath = os.path.join(BASE_FILES, 'testappend', 'firstif')
|
||||
_reline(destpath)
|
||||
destpath = os.path.join(BASE_FILES, 'testappend', 'secondif')
|
||||
_reline(destpath)
|
||||
|
||||
def test_show_highstate(self):
|
||||
'''
|
||||
|
@ -1436,6 +1419,34 @@ class StateModuleTest(ModuleCase, SaltReturnAssertsMixin):
|
|||
state_run['file_|-test_file_|-{0}_|-managed'.format(file_name)]['result'])
|
||||
self.assertTrue(os.path.isfile(file_name))
|
||||
|
||||
def test_parallel_state_with_long_tag(self):
|
||||
'''
|
||||
This tests the case where the state being executed has a long ID dec or
|
||||
name and states are being run in parallel. The filenames used for the
|
||||
parallel state cache were previously based on the tag for each chunk,
|
||||
and longer ID decs or name params can cause the cache file to be longer
|
||||
than the operating system's max file name length. To counter this we
|
||||
instead base64-encode the chunk's tag (as this is a bit faster than
|
||||
doing a sha256) and use up to the first 32 characters from that result
|
||||
as the cache filename. This test will ensure that long tags don't cause
|
||||
caching failures.
|
||||
|
||||
See https://github.com/saltstack/salt/issues/49738 for more info.
|
||||
'''
|
||||
short_command = 'helloworld'
|
||||
long_command = short_command * 25
|
||||
|
||||
ret = self.run_function(
|
||||
'state.sls',
|
||||
mods='issue-49738',
|
||||
pillar={'short_command': short_command,
|
||||
'long_command': long_command}
|
||||
)
|
||||
comments = sorted([x['comment'] for x in six.itervalues(ret)])
|
||||
expected = sorted(['Command "{0}" run'.format(x)
|
||||
for x in (short_command, long_command)])
|
||||
assert comments == expected
|
||||
|
||||
def tearDown(self):
|
||||
nonbase_file = os.path.join(TMP, 'nonbase_env')
|
||||
if os.path.isfile(nonbase_file):
|
||||
|
|
Loading…
Add table
Reference in a new issue