mirror of
https://github.com/saltstack/salt.git
synced 2025-04-17 10:10:20 +00:00
Merge branch '2016.11' into 'nitrogen'
Conflicts: - salt/cloud/clouds/nova.py - salt/spm/__init__.py
This commit is contained in:
commit
a69cd74f75
20 changed files with 199 additions and 72 deletions
|
@ -1,5 +1,6 @@
|
|||
salt.auth.rest module
|
||||
=====================
|
||||
==============
|
||||
salt.auth.rest
|
||||
==============
|
||||
|
||||
.. automodule:: salt.auth.rest
|
||||
:members:
|
||||
|
|
|
@ -145,10 +145,14 @@ Here is a simple YAML renderer example:
|
|||
.. code-block:: python
|
||||
|
||||
import yaml
|
||||
from salt.utils.yamlloader import SaltYamlSafeLoader
|
||||
def render(yaml_data, saltenv='', sls='', **kws):
|
||||
if not isinstance(yaml_data, basestring):
|
||||
yaml_data = yaml_data.read()
|
||||
data = yaml.load(yaml_data)
|
||||
data = yaml.load(
|
||||
yaml_data,
|
||||
Loader=SaltYamlSafeLoader
|
||||
)
|
||||
return data if data else {}
|
||||
|
||||
Full List of Renderers
|
||||
|
|
|
@ -280,6 +280,15 @@ configuration.
|
|||
This ID can be queried using the `list_vlans` function, as described below. This
|
||||
setting is optional.
|
||||
|
||||
If this setting is set to `None`, salt-cloud will connect to the private ip of
|
||||
the server.
|
||||
|
||||
.. note::
|
||||
|
||||
If this setting is not provided and the server is not built with a public
|
||||
vlan, `private_ssh` or `private_wds` will need to be set to make sure that
|
||||
salt-cloud attempts to connect to the private ip.
|
||||
|
||||
private_vlan
|
||||
------------
|
||||
If it is necessary for an instance to be created within a specific backend VLAN,
|
||||
|
@ -295,12 +304,12 @@ If a server is to only be used internally, meaning it does not have a public
|
|||
VLAN associated with it, this value would be set to True. This setting is
|
||||
optional. The default is False.
|
||||
|
||||
private_ssh
|
||||
-----------
|
||||
private_ssh or private_wds
|
||||
--------------------------
|
||||
Whether to run the deploy script on the server using the public IP address
|
||||
or the private IP address. If set to True, Salt Cloud will attempt to SSH into
|
||||
the new server using the private IP address. The default is False. This
|
||||
settiong is optional.
|
||||
or the private IP address. If set to True, Salt Cloud will attempt to SSH or
|
||||
WinRM into the new server using the private IP address. The default is False.
|
||||
This settiong is optional.
|
||||
|
||||
global_identifier
|
||||
-----------------
|
||||
|
|
|
@ -26,6 +26,27 @@ cache driver but helps for more complex drivers like ``consul``.
|
|||
For more details see ``memcache_expire_seconds`` and other ``memcache_*``
|
||||
options in the master config reverence.
|
||||
|
||||
|
||||
Docker Fixes
|
||||
============
|
||||
|
||||
- Docker authentication has been re-organized. Instead of attempting a login
|
||||
for each push/pull (which was unnecessary), a new function called
|
||||
:py:func:`dockerng.login <salt.modules.dockerng.login>` has been added, which
|
||||
authenticates to the registry and adds the credential token to the
|
||||
``~/.docker/config.json``. After upgrading, if you have not already performed
|
||||
a ``docker login`` on the minion using the docker CLI, you will need to run
|
||||
:py:func:`dockerng.login <salt.modules.dockerng.login>` to login. This only
|
||||
needs to be done once.
|
||||
- A bug in resolving the tag name for images in a custom registry (where a
|
||||
colon can appear in the image name, e.g.
|
||||
``myregistry.com:5000/image:tagname``) has been fixed. In previous releases,
|
||||
Salt would use the colon to separate the tag name from the image name, and if
|
||||
there was no colon, the default tag name of ``latest`` would be assumed.
|
||||
However, this caused custom registry images to be misidentified when no
|
||||
explicit tag name was passed (e.g. ``myregistry.com:5000/image``). To work
|
||||
around this in earlier releases, simply specify the tag name.
|
||||
|
||||
Salt-Cloud Fixes
|
||||
================
|
||||
|
||||
|
@ -617,7 +638,7 @@ Changes:
|
|||
- **PR** `#40495`_: (*rallytime*) [2016.11] Merge forward from 2016.3 to 2016.11
|
||||
@ *2017-04-03T18:36:16Z*
|
||||
|
||||
- **ISSUE** `#37322`_: (*kiemlicz*) master_tops generating improper top file
|
||||
- **ISSUE** `#37322`_: (*kiemlicz*) master_tops generating improper top file
|
||||
| refs: `#40427`_
|
||||
- **PR** `#40427`_: (*terminalmage*) Clarify the master_tops documentation
|
||||
* 02a1f64 Merge pull request `#40495`_ from rallytime/merge-2016.11
|
||||
|
@ -700,7 +721,7 @@ Changes:
|
|||
- **PR** `#40387`_: (*redbaron4*) More complete fix for 39692
|
||||
@ *2017-03-30T22:29:05Z*
|
||||
|
||||
- **ISSUE** `#39692`_: (*djsly*) tuned module and state are broken on 7.3 families.
|
||||
- **ISSUE** `#39692`_: (*djsly*) tuned module and state are broken on 7.3 families.
|
||||
| refs: `#39719`_ `#39768`_ `#40387`_ `#40387`_
|
||||
* dfaa670 Merge pull request `#40387`_ from redbaron4/`fix-39692`_
|
||||
* 77a40a0 Lint fixes
|
||||
|
@ -1557,7 +1578,7 @@ Changes:
|
|||
| refs: `#39624`_
|
||||
- **ISSUE** `#39336`_: (*GevatterGaul*) salt-minion fails with IPv6
|
||||
| refs: `#39766`_
|
||||
- **ISSUE** `#39333`_: (*jagguli*) Not Available error - Scheduling custom runner functions
|
||||
- **ISSUE** `#39333`_: (*jagguli*) Not Available error - Scheduling custom runner functions
|
||||
| refs: `#39791`_
|
||||
- **ISSUE** `#39119`_: (*frogunder*) Head of 2016.3 - Salt-Master uses 90 seconds to restart
|
||||
| refs: `#39796`_
|
||||
|
@ -1870,7 +1891,7 @@ Changes:
|
|||
- **PR** `#39768`_: (*rallytime*) Back-port `#39719`_ to 2016.11
|
||||
@ *2017-03-02T02:54:40Z*
|
||||
|
||||
- **ISSUE** `#39692`_: (*djsly*) tuned module and state are broken on 7.3 families.
|
||||
- **ISSUE** `#39692`_: (*djsly*) tuned module and state are broken on 7.3 families.
|
||||
| refs: `#39719`_ `#39768`_ `#40387`_ `#40387`_
|
||||
- **PR** `#39719`_: (*Seb-Solon*) Support new version of tuned-adm binary
|
||||
| refs: `#39768`_
|
||||
|
|
|
@ -60,11 +60,19 @@ option in the provider config.
|
|||
compute_name: nova
|
||||
compute_region: RegionOne
|
||||
service_type: compute
|
||||
verify: '/path/to/custom/certs/ca-bundle.crt'
|
||||
tenant: admin
|
||||
user: admin
|
||||
password: passwordgoeshere
|
||||
driver: nova
|
||||
|
||||
Note: by default the nova driver will attempt to verify its connection
|
||||
utilizing the system certificates. If you need to verify against another bundle
|
||||
of CA certificates or want to skip verification altogether you will need to
|
||||
specify the verify option. You can specify True or False to verify (or not)
|
||||
against system certificates, a path to a bundle or CA certs to check against, or
|
||||
None to allow keystoneauth to search for the certificates on its own.(defaults to True)
|
||||
|
||||
For local installations that only use private IP address ranges, the
|
||||
following option may be useful. Using the old syntax:
|
||||
|
||||
|
@ -301,6 +309,10 @@ def get_conn():
|
|||
if 'password' in vm_:
|
||||
kwargs['password'] = vm_['password']
|
||||
|
||||
if 'verify' in vm_ and vm_['use_keystoneauth'] is True:
|
||||
kwargs['verify'] = vm_['verify']
|
||||
elif 'verify' in vm_ and vm_['use_keystoneauth'] is False:
|
||||
log.warning('SSL Certificate verification option is specified but use_keystoneauth is False or not present')
|
||||
conn = nova.SaltNova(**kwargs)
|
||||
|
||||
return conn
|
||||
|
@ -608,7 +620,7 @@ def request_instance(vm_=None, call=None):
|
|||
'security_groups', vm_, __opts__, search_global=False
|
||||
)
|
||||
if security_groups is not None:
|
||||
vm_groups = security_groups.split(',')
|
||||
vm_groups = security_groups
|
||||
avail_groups = conn.secgroup_list()
|
||||
group_list = []
|
||||
|
||||
|
|
|
@ -400,7 +400,7 @@ def create(vm_):
|
|||
private_wds = config.get_cloud_config_value(
|
||||
'private_windows', vm_, __opts__, default=False
|
||||
)
|
||||
if private_ssh or private_wds or public_vlan is None or public_vlan is False:
|
||||
if private_ssh or private_wds or public_vlan is None:
|
||||
ip_type = 'primaryBackendIpAddress'
|
||||
|
||||
def wait_for_ip():
|
||||
|
|
|
@ -111,7 +111,7 @@ VALID_OPTS = {
|
|||
'master': (string_types, list),
|
||||
|
||||
# The TCP/UDP port of the master to connect to in order to listen to publications
|
||||
'master_port': int,
|
||||
'master_port': (string_types, int),
|
||||
|
||||
# The behaviour of the minion when connecting to a master. Can specify 'failover',
|
||||
# 'disable' or 'func'. If 'func' is specified, the 'master' option should be set to an
|
||||
|
|
|
@ -1769,10 +1769,14 @@ def ip_fqdn():
|
|||
|
||||
ret = {}
|
||||
ret['ipv4'] = salt.utils.network.ip_addrs(include_loopback=True)
|
||||
ret['ipv6'] = salt.utils.network.ip_addrs6(include_loopback=True)
|
||||
|
||||
_fqdn = hostname()['fqdn']
|
||||
for socket_type, ipv_num in ((socket.AF_INET, '4'), (socket.AF_INET6, '6')):
|
||||
sockets = [(socket.AF_INET, '4')]
|
||||
|
||||
if __opts__.get('ipv6', True):
|
||||
ret['ipv6'] = salt.utils.network.ip_addrs6(include_loopback=True)
|
||||
sockets.append((socket.AF_INET6, '6'))
|
||||
|
||||
for socket_type, ipv_num in sockets:
|
||||
key = 'fqdn_ip' + ipv_num
|
||||
if not ret['ipv' + ipv_num]:
|
||||
ret[key] = []
|
||||
|
@ -1781,8 +1785,9 @@ def ip_fqdn():
|
|||
info = socket.getaddrinfo(_fqdn, None, socket_type)
|
||||
ret[key] = list(set(item[4][0] for item in info))
|
||||
except socket.error:
|
||||
log.warning('Unable to find IPv{0} record for "{1}" causing a 10 second timeout when rendering grains. '
|
||||
'Set the dns or /etc/hosts for IPv{0} to clear this.'.format(ipv_num, _fqdn))
|
||||
if __opts__['__role'] == 'master':
|
||||
log.warning('Unable to find IPv{0} record for "{1}" causing a 10 second timeout when rendering grains. '
|
||||
'Set the dns or /etc/hosts for IPv{0} to clear this.'.format(ipv_num, _fqdn))
|
||||
ret[key] = []
|
||||
|
||||
return ret
|
||||
|
@ -1849,7 +1854,7 @@ def ip6_interfaces():
|
|||
# Provides:
|
||||
# ip_interfaces
|
||||
|
||||
if salt.utils.is_proxy():
|
||||
if salt.utils.is_proxy() or not __opts__.get('ipv6', True):
|
||||
return {}
|
||||
|
||||
ret = {}
|
||||
|
@ -1893,8 +1898,10 @@ def dns():
|
|||
return {}
|
||||
|
||||
resolv = salt.utils.dns.parse_resolv()
|
||||
for key in ('nameservers', 'ip4_nameservers', 'ip6_nameservers',
|
||||
'sortlist'):
|
||||
keys = ['nameservers', 'ip4_nameservers', 'sortlist']
|
||||
if __opts__.get('ipv6', True):
|
||||
keys.append('ip6_nameservers')
|
||||
for key in keys:
|
||||
if key in resolv:
|
||||
resolv[key] = [str(i) for i in resolv[key]]
|
||||
|
||||
|
|
|
@ -151,7 +151,7 @@ def resolve_dns(opts, fallback=True):
|
|||
if opts['master'] == '':
|
||||
raise SaltSystemExit
|
||||
ret['master_ip'] = \
|
||||
salt.utils.dns_check(opts['master'], opts['master_port'], True, opts['ipv6'])
|
||||
salt.utils.dns_check(opts['master'], int(opts['master_port']), True, opts['ipv6'])
|
||||
except SaltClientError:
|
||||
if opts['retry_dns']:
|
||||
while True:
|
||||
|
@ -165,7 +165,7 @@ def resolve_dns(opts, fallback=True):
|
|||
time.sleep(opts['retry_dns'])
|
||||
try:
|
||||
ret['master_ip'] = salt.utils.dns_check(
|
||||
opts['master'], opts['master_port'], True, opts['ipv6']
|
||||
opts['master'], int(opts['master_port']), True, opts['ipv6']
|
||||
)
|
||||
break
|
||||
except SaltClientError:
|
||||
|
@ -672,6 +672,7 @@ class SMinion(MinionBase):
|
|||
# If configured, cache pillar data on the minion
|
||||
if self.opts['file_client'] == 'remote' and self.opts.get('minion_pillar_cache', False):
|
||||
import yaml
|
||||
from salt.utils.yamldumper import SafeOrderedDumper
|
||||
pdir = os.path.join(self.opts['cachedir'], 'pillar')
|
||||
if not os.path.isdir(pdir):
|
||||
os.makedirs(pdir, 0o700)
|
||||
|
@ -682,11 +683,21 @@ class SMinion(MinionBase):
|
|||
penv = 'base'
|
||||
cache_top = {penv: {self.opts['id']: ['cache']}}
|
||||
with salt.utils.fopen(ptop, 'wb') as fp_:
|
||||
fp_.write(yaml.dump(cache_top))
|
||||
fp_.write(
|
||||
yaml.dump(
|
||||
cache_top,
|
||||
Dumper=SafeOrderedDumper
|
||||
)
|
||||
)
|
||||
os.chmod(ptop, 0o600)
|
||||
cache_sls = os.path.join(pdir, 'cache.sls')
|
||||
with salt.utils.fopen(cache_sls, 'wb') as fp_:
|
||||
fp_.write(yaml.dump(self.opts['pillar']))
|
||||
fp_.write(
|
||||
yaml.dump(
|
||||
self.opts['pillar'],
|
||||
Dumper=SafeOrderedDumper
|
||||
)
|
||||
)
|
||||
os.chmod(cache_sls, 0o600)
|
||||
|
||||
def gen_modules(self, initial_load=False):
|
||||
|
@ -1990,23 +2001,27 @@ class Minion(MinionBase):
|
|||
if self.connected:
|
||||
# we are not connected anymore
|
||||
self.connected = False
|
||||
# modify the scheduled job to fire only on reconnect
|
||||
if self.opts['transport'] != 'tcp':
|
||||
schedule = {
|
||||
'function': 'status.master',
|
||||
'seconds': self.opts['master_alive_interval'],
|
||||
'jid_include': True,
|
||||
'maxrunning': 1,
|
||||
'return_job': False,
|
||||
'kwargs': {'master': self.opts['master'],
|
||||
'connected': False}
|
||||
}
|
||||
self.schedule.modify_job(name=master_event(type='alive', master=self.opts['master']),
|
||||
schedule=schedule)
|
||||
|
||||
log.info('Connection to master {0} lost'.format(self.opts['master']))
|
||||
|
||||
if self.opts['master_type'] == 'failover':
|
||||
if self.opts['master_type'] != 'failover':
|
||||
# modify the scheduled job to fire on reconnect
|
||||
if self.opts['transport'] != 'tcp':
|
||||
schedule = {
|
||||
'function': 'status.master',
|
||||
'seconds': self.opts['master_alive_interval'],
|
||||
'jid_include': True,
|
||||
'maxrunning': 1,
|
||||
'return_job': False,
|
||||
'kwargs': {'master': self.opts['master'],
|
||||
'connected': False}
|
||||
}
|
||||
self.schedule.modify_job(name=master_event(type='alive', master=self.opts['master']),
|
||||
schedule=schedule)
|
||||
else:
|
||||
# delete the scheduled job to don't interfere with the failover process
|
||||
if self.opts['transport'] != 'tcp':
|
||||
self.schedule.delete_job(name=master_event(type='alive'))
|
||||
|
||||
log.info('Trying to tune in to next master from master-list')
|
||||
|
||||
if hasattr(self, 'pub_channel'):
|
||||
|
@ -2076,7 +2091,10 @@ class Minion(MinionBase):
|
|||
|
||||
elif tag.startswith(master_event(type='connected')):
|
||||
# handle this event only once. otherwise it will pollute the log
|
||||
if not self.connected:
|
||||
# also if master type is failover all the reconnection work is done
|
||||
# by `disconnected` event handler and this event must never happen,
|
||||
# anyway check it to be sure
|
||||
if not self.connected and self.opts['master_type'] != 'failover':
|
||||
log.info('Connection to master {0} re-established'.format(self.opts['master']))
|
||||
self.connected = True
|
||||
# modify the __master_alive job to only fire,
|
||||
|
|
|
@ -767,12 +767,13 @@ def install(name=None,
|
|||
env = _parse_env(kwargs.get('env'))
|
||||
env.update(DPKG_ENV_VARS.copy())
|
||||
|
||||
state = get_selections(state='hold')
|
||||
hold_pkgs = state.get('hold')
|
||||
to_unhold = []
|
||||
for _pkg in hold_pkgs:
|
||||
if _pkg in all_pkgs:
|
||||
to_unhold.append(_pkg)
|
||||
hold_pkgs = get_selections(state='hold').get('hold', [])
|
||||
# all_pkgs contains the argument to be passed to apt-get install, which
|
||||
# when a specific version is requested will be in the format name=version.
|
||||
# Strip off the '=' if present so we can compare the held package names
|
||||
# against the pacakges we are trying to install.
|
||||
targeted_names = [x.split('=')[0] for x in all_pkgs]
|
||||
to_unhold = [x for x in hold_pkgs if x in targeted_names]
|
||||
|
||||
if to_unhold:
|
||||
unhold(pkgs=to_unhold)
|
||||
|
|
|
@ -24,7 +24,7 @@ import logging
|
|||
import yaml
|
||||
|
||||
# Import salt libs
|
||||
from salt.utils.yamldumper import OrderedDumper
|
||||
from salt.utils.yamldumper import SafeOrderedDumper
|
||||
|
||||
# Define the module's virtual name
|
||||
__virtualname__ = 'yaml'
|
||||
|
@ -41,7 +41,7 @@ def output(data, **kwargs): # pylint: disable=unused-argument
|
|||
Print out YAML using the block mode
|
||||
'''
|
||||
|
||||
params = dict(Dumper=OrderedDumper)
|
||||
params = dict(Dumper=SafeOrderedDumper)
|
||||
if 'output_indent' not in __opts__:
|
||||
# default indentation
|
||||
params.update(default_flow_style=False)
|
||||
|
|
|
@ -126,10 +126,11 @@ from __future__ import absolute_import
|
|||
import logging
|
||||
import re
|
||||
import yaml
|
||||
import salt.utils.minions
|
||||
|
||||
from salt.exceptions import CommandExecutionError
|
||||
from salt.utils.dictupdate import update as dict_merge
|
||||
import salt.utils.minions
|
||||
from salt.utils.yamlloader import SaltYamlSafeLoader
|
||||
|
||||
# Import third party libs
|
||||
try:
|
||||
|
@ -251,7 +252,10 @@ def pillar_format(ret, keys, value):
|
|||
# If value is not None then it's a string
|
||||
# Use YAML to parse the data
|
||||
# YAML strips whitespaces unless they're surrounded by quotes
|
||||
pillar_value = yaml.load(value)
|
||||
pillar_value = yaml.load(
|
||||
value,
|
||||
Loader=SaltYamlSafeLoader
|
||||
)
|
||||
|
||||
keyvalue = keys.pop()
|
||||
pil = {keyvalue: pillar_value}
|
||||
|
|
|
@ -279,18 +279,19 @@ import jinja2
|
|||
import re
|
||||
from os.path import isfile, join
|
||||
|
||||
# Import 3rd-party libs
|
||||
# Import Salt libs
|
||||
import salt.ext.six as six
|
||||
from salt.ext.six.moves import input # pylint: disable=import-error,redefined-builtin
|
||||
import salt.utils
|
||||
from salt.utils.yamlloader import SaltYamlSafeLoader
|
||||
|
||||
# Import 3rd-party libs
|
||||
try:
|
||||
import requests
|
||||
HAS_REQUESTS = True
|
||||
except ImportError:
|
||||
HAS_REQUESTS = False
|
||||
|
||||
# Import Salt libs
|
||||
import salt.utils
|
||||
|
||||
# Only used when called from a terminal
|
||||
log = None
|
||||
|
@ -436,7 +437,10 @@ def ext_pillar(minion_id, pillar, resource, sequence, subkey=False, subkey_only=
|
|||
data['grains'] = __grains__.copy()
|
||||
data['pillar'] = pillar.copy()
|
||||
results_jinja = template.render(data)
|
||||
results = yaml.load(results_jinja)
|
||||
results = yaml.load(
|
||||
results_jinja,
|
||||
Loader=SaltYamlSafeLoader
|
||||
)
|
||||
except jinja2.UndefinedError as err:
|
||||
log.error('Failed to parse JINJA template: {0}\n{1}'.format(fn, err))
|
||||
except yaml.YAMLError as err:
|
||||
|
@ -529,7 +533,10 @@ def validate(output, resource):
|
|||
data = output
|
||||
data['grains'] = __grains__.copy()
|
||||
data['pillar'] = __pillar__.copy()
|
||||
schema = yaml.load(template.render(data))
|
||||
schema = yaml.load(
|
||||
template.render(data),
|
||||
Loader=SaltYamlSafeLoader
|
||||
)
|
||||
all_schemas.update(schema)
|
||||
pepa_schemas.append(fn)
|
||||
|
||||
|
@ -551,7 +558,12 @@ if __name__ == '__main__':
|
|||
|
||||
# Get configuration
|
||||
with salt.utils.fopen(args.config) as fh_:
|
||||
__opts__.update(yaml.load(fh_.read()))
|
||||
__opts__.update(
|
||||
yaml.load(
|
||||
fh_.read(),
|
||||
Loader=SaltYamlSafeLoader
|
||||
)
|
||||
)
|
||||
|
||||
loc = 0
|
||||
for name in [next(iter(list(e.keys()))) for e in __opts__['ext_pillar']]:
|
||||
|
@ -564,14 +576,24 @@ if __name__ == '__main__':
|
|||
if 'pepa_grains' in __opts__:
|
||||
__grains__ = __opts__['pepa_grains']
|
||||
if args.grains:
|
||||
__grains__.update(yaml.load(args.grains))
|
||||
__grains__.update(
|
||||
yaml.load(
|
||||
args.grains,
|
||||
Loader=SaltYamlSafeLoader
|
||||
)
|
||||
)
|
||||
|
||||
# Get pillars
|
||||
__pillar__ = {}
|
||||
if 'pepa_pillar' in __opts__:
|
||||
__pillar__ = __opts__['pepa_pillar']
|
||||
if args.pillar:
|
||||
__pillar__.update(yaml.load(args.pillar))
|
||||
__pillar__.update(
|
||||
yaml.load(
|
||||
args.pillar,
|
||||
Loader=SaltYamlSafeLoader
|
||||
)
|
||||
)
|
||||
|
||||
# Validate or not
|
||||
if args.validate:
|
||||
|
@ -607,7 +629,6 @@ if __name__ == '__main__':
|
|||
raise RuntimeError('Failed to get Grains from SaltStack REST API')
|
||||
|
||||
__grains__ = result[args.hostname]
|
||||
# print yaml.safe_dump(__grains__, indent=4, default_flow_style=False)
|
||||
|
||||
# Print results
|
||||
ex_subkey = False
|
||||
|
|
|
@ -91,6 +91,7 @@ import salt.ext.six.moves.http_client
|
|||
# Import Salt Libs
|
||||
import salt.returners
|
||||
import salt.utils.slack
|
||||
from salt.utils.yamldumper import SafeOrderedDumper
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
@ -211,7 +212,7 @@ def returner(ret):
|
|||
returns = dict((key, value) for key, value in returns.items() if value['result'] is not True or value['changes'])
|
||||
|
||||
if yaml_format is True:
|
||||
returns = yaml.dump(returns)
|
||||
returns = yaml.dump(returns, Dumper=SafeOrderedDumper)
|
||||
else:
|
||||
returns = pprint.pformat(returns)
|
||||
|
||||
|
|
|
@ -12,7 +12,6 @@ import yaml
|
|||
import tarfile
|
||||
import shutil
|
||||
import msgpack
|
||||
import datetime
|
||||
import hashlib
|
||||
import logging
|
||||
import pwd
|
||||
|
@ -30,9 +29,9 @@ import salt.syspaths as syspaths
|
|||
import salt.ext.six as six
|
||||
from salt.ext.six import string_types
|
||||
from salt.ext.six.moves import input
|
||||
from salt.ext.six.moves import zip
|
||||
from salt.ext.six.moves import filter
|
||||
from salt.template import compile_template
|
||||
from salt.utils.yamldumper import SafeOrderedDumper
|
||||
|
||||
# Get logging started
|
||||
log = logging.getLogger(__name__)
|
||||
|
@ -746,7 +745,14 @@ class SPMClient(object):
|
|||
|
||||
metadata_filename = '{0}/SPM-METADATA'.format(repo_path)
|
||||
with salt.utils.fopen(metadata_filename, 'w') as mfh:
|
||||
yaml.dump(repo_metadata, mfh, indent=4, canonical=False, default_flow_style=False)
|
||||
yaml.dump(
|
||||
repo_metadata,
|
||||
mfh,
|
||||
indent=4,
|
||||
canonical=False,
|
||||
default_flow_style=False,
|
||||
Dumper=SafeOrderedDumper
|
||||
)
|
||||
|
||||
log.debug('Wrote {0}'.format(metadata_filename))
|
||||
|
||||
|
|
|
@ -58,6 +58,7 @@ import yaml
|
|||
# Import Salt Libs
|
||||
import salt.ext.six as six
|
||||
import salt.utils
|
||||
from salt.utils.yamlloader import SaltYamlSafeLoader
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
@ -713,7 +714,10 @@ class _Swagger(object):
|
|||
error_response_template,
|
||||
response_template)
|
||||
with salt.utils.fopen(self._swagger_file, 'rb') as sf:
|
||||
self._cfg = yaml.load(sf)
|
||||
self._cfg = yaml.load(
|
||||
sf,
|
||||
Loader=SaltYamlSafeLoader
|
||||
)
|
||||
self._swagger_version = ''
|
||||
else:
|
||||
raise IOError('Invalid swagger file path, {0}'.format(swagger_file_path))
|
||||
|
|
|
@ -241,7 +241,7 @@ class SaltNova(object):
|
|||
os_auth_plugin=os_auth_plugin,
|
||||
**kwargs)
|
||||
|
||||
def _new_init(self, username, project_id, auth_url, region_name, password, os_auth_plugin, auth=None, **kwargs):
|
||||
def _new_init(self, username, project_id, auth_url, region_name, password, os_auth_plugin, auth=None, verify=True, **kwargs):
|
||||
if auth is None:
|
||||
auth = {}
|
||||
|
||||
|
@ -281,7 +281,7 @@ class SaltNova(object):
|
|||
|
||||
self.client_kwargs = sanatize_novaclient(self.client_kwargs)
|
||||
options = loader.load_from_options(**self.kwargs)
|
||||
self.session = keystoneauth1.session.Session(auth=options)
|
||||
self.session = keystoneauth1.session.Session(auth=options, verify=verify)
|
||||
conn = client.Client(version=self.version, session=self.session, **self.client_kwargs)
|
||||
self.kwargs['auth_token'] = conn.client.session.get_token()
|
||||
self.catalog = conn.client.session.get('/auth/catalog', endpoint_filter={'service_type': 'identity'}).json().get('catalog', [])
|
||||
|
|
|
@ -41,6 +41,7 @@ from salt.utils.verify import verify_files
|
|||
import salt.exceptions
|
||||
import salt.ext.six as six
|
||||
from salt.ext.six.moves import range # pylint: disable=import-error,redefined-builtin
|
||||
from salt.utils.yamldumper import SafeOrderedDumper
|
||||
|
||||
|
||||
def _sorted(mixins_or_funcs):
|
||||
|
@ -2011,7 +2012,13 @@ class SaltCMDOptionParser(six.with_metaclass(OptionParserMeta,
|
|||
# Dump the master configuration file, exit normally at the end.
|
||||
if self.options.config_dump:
|
||||
cfg = config.master_config(self.get_config_file_path())
|
||||
sys.stdout.write(yaml.dump(cfg, default_flow_style=False))
|
||||
sys.stdout.write(
|
||||
yaml.dump(
|
||||
cfg,
|
||||
default_flow_style=False,
|
||||
Dumper=SafeOrderedDumper
|
||||
)
|
||||
)
|
||||
sys.exit(salt.defaults.exitcodes.EX_OK)
|
||||
|
||||
if self.options.preview_target:
|
||||
|
|
|
@ -349,6 +349,7 @@ import salt.log.setup as log_setup
|
|||
import salt.defaults.exitcodes
|
||||
from salt.utils.odict import OrderedDict
|
||||
from salt.utils.process import os_is_running, default_signals, SignalHandlingMultiprocessingProcess
|
||||
from salt.utils.yamldumper import SafeOrderedDumper
|
||||
|
||||
# Import 3rd-party libs
|
||||
import salt.ext.six as six
|
||||
|
@ -473,7 +474,10 @@ class Schedule(object):
|
|||
with salt.utils.fopen(schedule_conf, 'wb+') as fp_:
|
||||
fp_.write(
|
||||
salt.utils.to_bytes(
|
||||
yaml.dump({'schedule': self._get_schedule(include_pillar=False)})
|
||||
yaml.dump(
|
||||
{'schedule': self._get_schedule(include_pillar=False)},
|
||||
Dumper=SafeOrderedDumper
|
||||
)
|
||||
)
|
||||
)
|
||||
except (IOError, OSError):
|
||||
|
|
|
@ -13,6 +13,7 @@ import yaml
|
|||
|
||||
# Import salt libs
|
||||
import salt.config
|
||||
from salt.utils.yamldumper import SafeOrderedDumper
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
@ -39,7 +40,13 @@ def apply(key, value):
|
|||
data = values()
|
||||
data[key] = value
|
||||
with salt.utils.fopen(path, 'w+') as fp_:
|
||||
fp_.write(yaml.dump(data, default_flow_style=False))
|
||||
fp_.write(
|
||||
yaml.dump(
|
||||
data,
|
||||
default_flow_style=False,
|
||||
Dumper=SafeOrderedDumper
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
def update_config(file_name, yaml_contents):
|
||||
|
|
Loading…
Add table
Reference in a new issue