mirror of
https://github.com/saltstack/salt.git
synced 2025-04-17 10:10:20 +00:00
Merge remote-tracking branch 'upstream/2015.8' into merge-forward-develop
Conflicts: salt/cloud/__init__.py salt/modules/smartos_vmadm.py salt/utils/s3.py setup.py
This commit is contained in:
commit
1bbe45538e
39 changed files with 3374 additions and 730 deletions
3
doc/_static/proxy_minions.drawio.xml
vendored
3
doc/_static/proxy_minions.drawio.xml
vendored
File diff suppressed because one or more lines are too long
BIN
doc/_static/proxy_minions.png
vendored
BIN
doc/_static/proxy_minions.png
vendored
Binary file not shown.
Before Width: | Height: | Size: 118 KiB After Width: | Height: | Size: 113 KiB |
2
doc/_static/proxy_minions.svg
vendored
2
doc/_static/proxy_minions.svg
vendored
File diff suppressed because one or more lines are too long
Before Width: | Height: | Size: 109 KiB After Width: | Height: | Size: 109 KiB |
File diff suppressed because it is too large
Load diff
|
@ -77,6 +77,21 @@ Optional Fields
|
|||
```````````````
|
||||
The following fields may also be present.
|
||||
|
||||
top_level_dir
|
||||
~~~~~~~~~~~~~
|
||||
This field is optional, but highly recommended. If it is not specified, the
|
||||
package name will be used.
|
||||
|
||||
Formula repositories typically do not store ``.sls`` files in the root of the
|
||||
repository; instead they are stored in a subdirectory. For instance, an
|
||||
``apache-formula`` repository would contain a directory called ``apache``, which
|
||||
would contain an ``init.sls``, plus a number of other related files. In this
|
||||
instance, the ``top_level_dir`` should be set to ``apache``.
|
||||
|
||||
Files outside the ``top_level_dir``, such as ``README.rst``, ``FORMULA``, and
|
||||
``LICENSE`` will not be installed. The exceptions to this rule are files that
|
||||
are already treated specially, such as ``pillar.example`` and ``_modules/``.
|
||||
|
||||
dependencies
|
||||
~~~~~~~~~~~~
|
||||
A list of packages which must be installed before this package can function.
|
||||
|
@ -166,7 +181,7 @@ Pillars
|
|||
=======
|
||||
Formula packages include a pillar.example file. Rather than being placed in the
|
||||
formula directory, this file is renamed to ``<formula name>.sls.orig`` and
|
||||
placed in the ``pillar_roots``, where it can be easily updated to meet the
|
||||
placed in the ``pillar_path``, where it can be easily updated to meet the
|
||||
user's needs.
|
||||
|
||||
Loader Modules
|
||||
|
@ -275,11 +290,16 @@ treated as if they have a ``-formula`` name.
|
|||
|
||||
formula
|
||||
-------
|
||||
By default, most files from this type of package live in the ``/srv/salt/``
|
||||
By default, most files from this type of package live in the ``/srv/spm/salt/``
|
||||
directory. The exception is the ``pillar.example`` file, which will be renamed
|
||||
to ``<package_name>.sls`` and placed in the pillar directory (``/srv/pillar/``
|
||||
to ``<package_name>.sls`` and placed in the pillar directory (``/srv/spm/pillar/``
|
||||
by default).
|
||||
|
||||
reactor
|
||||
-------
|
||||
By default, files from this type of package live in the ``/srv/spm/reactor/``
|
||||
directory.
|
||||
|
||||
conf
|
||||
----
|
||||
The files in this type of package are configuration files for Salt, which
|
||||
|
|
|
@ -38,7 +38,7 @@ class SSHState(salt.state.State):
|
|||
self.functions = self.wrapper
|
||||
self.utils = salt.loader.utils(self.opts)
|
||||
locals_ = salt.loader.minion_mods(self.opts, utils=self.utils)
|
||||
self.states = salt.loader.states(self.opts, locals_)
|
||||
self.states = salt.loader.states(self.opts, locals_, self.utils)
|
||||
self.rend = salt.loader.render(self.opts, self.functions)
|
||||
|
||||
def check_refresh(self, data, ret):
|
||||
|
|
|
@ -81,7 +81,8 @@ def enter_mainloop(target,
|
|||
pool_size=None,
|
||||
callback=None,
|
||||
queue=None):
|
||||
'''Manage a multiprocessing pool
|
||||
'''
|
||||
Manage a multiprocessing pool
|
||||
|
||||
- If the queue does not output anything, the pool runs indefinitely
|
||||
|
||||
|
@ -1178,12 +1179,11 @@ class Cloud(object):
|
|||
)
|
||||
|
||||
if deploy:
|
||||
if make_master is False and 'master' not in minion_dict:
|
||||
raise SaltCloudConfigError(
|
||||
(
|
||||
'There\'s no master defined on the '
|
||||
'\'{0}\' VM settings'
|
||||
).format(vm_['name'])
|
||||
if not make_master and 'master' not in minion_dict:
|
||||
log.warn(
|
||||
'There\'s no master defined on the {0!r} VM settings.'.format(
|
||||
vm_['name']
|
||||
)
|
||||
)
|
||||
|
||||
if 'pub_key' not in vm_ and 'priv_key' not in vm_:
|
||||
|
|
|
@ -1036,6 +1036,41 @@ def list_nodes_full(call=None):
|
|||
return _list_linodes(full=True)
|
||||
|
||||
|
||||
def list_nodes_min(call=None):
|
||||
'''
|
||||
Return a list of the VMs that are on the provider. Only a list of VM names and
|
||||
their state is returned. This is the minimum amount of information needed to
|
||||
check for existing VMs.
|
||||
|
||||
.. versionadded:: 2015.8.0
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt-cloud -f list_nodes_min my-linode-config
|
||||
salt-cloud --function list_nodes_min my-linode-config
|
||||
'''
|
||||
if call == 'action':
|
||||
raise SaltCloudSystemExit(
|
||||
'The list_nodes_min function must be called with -f or --function.'
|
||||
)
|
||||
|
||||
ret = {}
|
||||
nodes = _query('linode', 'list')['DATA']
|
||||
|
||||
for node in nodes:
|
||||
name = node['LABEL']
|
||||
this_node = {
|
||||
'id': str(node['LINODEID']),
|
||||
'state': _get_status_descr_by_id(int(node['STATUS']))
|
||||
}
|
||||
|
||||
ret[name] = this_node
|
||||
|
||||
return ret
|
||||
|
||||
|
||||
def reboot(name, call=None):
|
||||
'''
|
||||
Reboot a linode.
|
||||
|
@ -1304,28 +1339,6 @@ def _list_linodes(full=False):
|
|||
return ret
|
||||
|
||||
|
||||
def _check_and_set_node_id(name, linode_id):
|
||||
'''
|
||||
Helper function that checks against name and linode_id collisions and returns a node_id.
|
||||
'''
|
||||
node_id = ''
|
||||
if linode_id and name is None:
|
||||
node_id = linode_id
|
||||
elif name:
|
||||
node_id = get_linode_id_from_name(name)
|
||||
|
||||
if linode_id and (linode_id != node_id):
|
||||
raise SaltCloudException(
|
||||
'A name and a linode_id were provided, but the provided linode_id, {0}, '
|
||||
'does not match the linode_id found for the provided '
|
||||
'name: \'{1}\': \'{2}\'. Nothing was done.'.format(
|
||||
linode_id, name, node_id
|
||||
)
|
||||
)
|
||||
|
||||
return node_id
|
||||
|
||||
|
||||
def _query(action=None,
|
||||
command=None,
|
||||
args=None,
|
||||
|
|
|
@ -155,6 +155,9 @@ def __virtual__():
|
|||
'''
|
||||
request_log.setLevel(getattr(logging, __opts__.get('requests_log_level', 'warning').upper()))
|
||||
|
||||
if get_configured_provider() is False:
|
||||
return False
|
||||
|
||||
if get_dependencies() is False:
|
||||
return False
|
||||
|
||||
|
@ -167,7 +170,8 @@ def get_configured_provider():
|
|||
'''
|
||||
return config.is_provider_configured(
|
||||
__opts__,
|
||||
__active_provider_name__ or __virtualname__
|
||||
__active_provider_name__ or __virtualname__,
|
||||
('user', 'tenant', 'identity_url', 'compute_region',)
|
||||
)
|
||||
|
||||
|
||||
|
|
|
@ -25,11 +25,14 @@ from salt.utils.openstack import pyrax as suop
|
|||
__virtualname__ = 'pyrax'
|
||||
|
||||
|
||||
# Only load in this module is the OPENSTACK configurations are in place
|
||||
# Only load in this module is the PYRAX configurations are in place
|
||||
def __virtual__():
|
||||
'''
|
||||
Check for Nova configurations
|
||||
Check for Pyrax configurations
|
||||
'''
|
||||
if get_configured_provider() is False:
|
||||
return False
|
||||
|
||||
if get_dependencies() is False:
|
||||
return False
|
||||
|
||||
|
@ -42,7 +45,8 @@ def get_configured_provider():
|
|||
'''
|
||||
return config.is_provider_configured(
|
||||
__opts__,
|
||||
__active_provider_name__ or 'pyrax'
|
||||
__active_provider_name__ or __virtualname__,
|
||||
('username', 'identity_url', 'compute_region',)
|
||||
)
|
||||
|
||||
|
||||
|
|
|
@ -138,6 +138,12 @@ def __virtual__():
|
|||
if get_dependencies() is False:
|
||||
return False
|
||||
|
||||
warn_until(
|
||||
'Carbon',
|
||||
'The vsphere driver is deprecated in favor of the vmware driver and will be removed '
|
||||
'in Salt Carbon. Please convert your vsphere provider configs to use the vmware driver.'
|
||||
)
|
||||
|
||||
return __virtualname__
|
||||
|
||||
|
||||
|
@ -145,11 +151,6 @@ def get_configured_provider():
|
|||
'''
|
||||
Return the first configured instance.
|
||||
'''
|
||||
warn_until(
|
||||
'Carbon',
|
||||
'The vsphere driver is deprecated in favor of the vmware driver and will be removed '
|
||||
'in Salt Carbon. Please convert your vsphere provider configs to use the vmware driver.'
|
||||
)
|
||||
return config.is_provider_configured(
|
||||
__opts__,
|
||||
__active_provider_name__ or __virtualname__,
|
||||
|
|
|
@ -372,7 +372,14 @@ def destroy(name, conn=None, call=None):
|
|||
transport=__opts__['transport']
|
||||
)
|
||||
if __opts__['delete_sshkeys'] is True:
|
||||
salt.utils.cloud.remove_sshkey(getattr(node, __opts__.get('ssh_interface', 'public_ips'))[0])
|
||||
public_ips = getattr(node, __opts__.get('ssh_interface', 'public_ips'))
|
||||
if public_ips:
|
||||
salt.utils.cloud.remove_sshkey(public_ips[0])
|
||||
|
||||
private_ips = getattr(node, __opts__.get('ssh_interface', 'private_ips'))
|
||||
if private_ips:
|
||||
salt.utils.cloud.remove_sshkey(private_ips[0])
|
||||
|
||||
if __opts__.get('update_cachedir', False) is True:
|
||||
salt.utils.cloud.delete_minion_cachedir(name, __active_provider_name__.split(':')[0], __opts__)
|
||||
|
||||
|
|
|
@ -590,8 +590,10 @@ VALID_OPTS = {
|
|||
'winrepo_source_dir': str,
|
||||
|
||||
'winrepo_dir': str,
|
||||
'winrepo_dir_ng': str,
|
||||
'winrepo_cachefile': str,
|
||||
'winrepo_remotes': list,
|
||||
'winrepo_remotes_ng': list,
|
||||
'winrepo_branch': str,
|
||||
'winrepo_ssl_verify': bool,
|
||||
'winrepo_user': str,
|
||||
|
@ -782,7 +784,8 @@ DEFAULT_MINION_OPTS = {
|
|||
'file_client': 'remote',
|
||||
'use_master_when_local': False,
|
||||
'file_roots': {
|
||||
'base': [salt.syspaths.BASE_FILE_ROOTS_DIR],
|
||||
'base': [salt.syspaths.BASE_FILE_ROOTS_DIR,
|
||||
salt.syspaths.SPM_FORMULA_PATH]
|
||||
},
|
||||
'top_file_merging_strategy': 'merge',
|
||||
'env_order': [],
|
||||
|
@ -796,7 +799,8 @@ DEFAULT_MINION_OPTS = {
|
|||
'fileserver_followsymlinks': True,
|
||||
'fileserver_ignoresymlinks': False,
|
||||
'pillar_roots': {
|
||||
'base': [salt.syspaths.BASE_PILLAR_ROOTS_DIR],
|
||||
'base': [salt.syspaths.BASE_PILLAR_ROOTS_DIR,
|
||||
salt.syspaths.SPM_PILLAR_PATH]
|
||||
},
|
||||
'git_pillar_base': 'master',
|
||||
'git_pillar_branch': 'master',
|
||||
|
@ -881,10 +885,20 @@ DEFAULT_MINION_OPTS = {
|
|||
'syndic_log_file': os.path.join(salt.syspaths.LOGS_DIR, 'syndic'),
|
||||
'syndic_pidfile': os.path.join(salt.syspaths.PIDFILE_DIR, 'salt-syndic.pid'),
|
||||
'random_reauth_delay': 10,
|
||||
'winrepo_source_dir': 'salt://win/repo/',
|
||||
'winrepo_source_dir': 'salt://win/repo-ng/',
|
||||
'winrepo_dir': os.path.join(salt.syspaths.BASE_FILE_ROOTS_DIR, 'win', 'repo'),
|
||||
'winrepo_dir_ng': os.path.join(salt.syspaths.BASE_FILE_ROOTS_DIR, 'win', 'repo-ng'),
|
||||
'winrepo_cachefile': 'winrepo.p',
|
||||
'winrepo_remotes': ['https://github.com/saltstack/salt-winrepo.git'],
|
||||
'winrepo_remotes_ng': ['https://github.com/saltstack/salt-winrepo-ng.git'],
|
||||
'winrepo_branch': 'master',
|
||||
'winrepo_ssl_verify': False,
|
||||
'winrepo_user': '',
|
||||
'winrepo_password': '',
|
||||
'winrepo_insecure_auth': False,
|
||||
'winrepo_privkey': '',
|
||||
'winrepo_pubkey': '',
|
||||
'winrepo_passphrase': '',
|
||||
'pidfile': os.path.join(salt.syspaths.PIDFILE_DIR, 'salt-minion.pid'),
|
||||
'range_server': 'range:80',
|
||||
'tcp_keepalive': True,
|
||||
|
@ -944,13 +958,15 @@ DEFAULT_MASTER_OPTS = {
|
|||
'pki_dir': os.path.join(salt.syspaths.CONFIG_DIR, 'pki', 'master'),
|
||||
'cachedir': os.path.join(salt.syspaths.CACHE_DIR, 'master'),
|
||||
'file_roots': {
|
||||
'base': [salt.syspaths.BASE_FILE_ROOTS_DIR],
|
||||
'base': [salt.syspaths.BASE_FILE_ROOTS_DIR,
|
||||
salt.syspaths.SPM_FORMULA_PATH]
|
||||
},
|
||||
'master_roots': {
|
||||
'base': [salt.syspaths.BASE_MASTER_ROOTS_DIR],
|
||||
},
|
||||
'pillar_roots': {
|
||||
'base': [salt.syspaths.BASE_PILLAR_ROOTS_DIR],
|
||||
'base': [salt.syspaths.BASE_PILLAR_ROOTS_DIR,
|
||||
salt.syspaths.SPM_PILLAR_PATH]
|
||||
},
|
||||
'top_file_merging_strategy': 'merge',
|
||||
'env_order': [],
|
||||
|
@ -1089,8 +1105,10 @@ DEFAULT_MASTER_OPTS = {
|
|||
'permissive_pki_access': False,
|
||||
'default_include': 'master.d/*.conf',
|
||||
'winrepo_dir': os.path.join(salt.syspaths.BASE_FILE_ROOTS_DIR, 'win', 'repo'),
|
||||
'winrepo_dir_ng': os.path.join(salt.syspaths.BASE_FILE_ROOTS_DIR, 'win', 'repo-ng'),
|
||||
'winrepo_cachefile': 'winrepo.p',
|
||||
'winrepo_remotes': ['https://github.com/saltstack/salt-winrepo.git'],
|
||||
'winrepo_remotes_ng': ['https://github.com/saltstack/salt-winrepo-ng.git'],
|
||||
'winrepo_branch': 'master',
|
||||
'winrepo_ssl_verify': False,
|
||||
'winrepo_user': '',
|
||||
|
@ -1198,13 +1216,15 @@ DEFAULT_API_OPTS = {
|
|||
|
||||
DEFAULT_SPM_OPTS = {
|
||||
# ----- Salt master settings overridden by SPM --------------------->
|
||||
'reactor_roots': '/srv/reactor',
|
||||
'formula_path': '/srv/spm/salt',
|
||||
'pillar_path': '/srv/spm/pillar',
|
||||
'reactor_path': '/srv/spm/reactor',
|
||||
'spm_logfile': '/var/log/salt/spm',
|
||||
# spm_repos_config also includes a .d/ directory
|
||||
'spm_repos_config': '/etc/salt/spm.repos',
|
||||
'spm_cache_dir': os.path.join(salt.syspaths.CACHE_DIR, 'spm'),
|
||||
'spm_build_dir': '/srv/spm',
|
||||
'spm_build_exclude': ['.git'],
|
||||
'spm_build_dir': '/srv/spm_build',
|
||||
'spm_build_exclude': ['CVS', '.hg', '.git', '.svn'],
|
||||
'spm_db': os.path.join(salt.syspaths.CACHE_DIR, 'spm', 'packages.db'),
|
||||
# <---- Salt master settings overridden by SPM ----------------------
|
||||
}
|
||||
|
|
|
@ -72,7 +72,7 @@ def _freebsd_disks():
|
|||
devices = __salt__['cmd.run']('{0} -n kern.disks'.format(sysctl))
|
||||
SSD_TOKEN = 'non-rotating'
|
||||
|
||||
for device in devices.split(' '):
|
||||
for device in devices.split(' ')[1:]:
|
||||
if device.startswith('cd'):
|
||||
log.debug('Disk grain skipping cd')
|
||||
elif _freebsd_vbox():
|
||||
|
|
|
@ -388,7 +388,7 @@ def roster(opts, whitelist=None):
|
|||
whitelist=whitelist)
|
||||
|
||||
|
||||
def states(opts, functions, whitelist=None):
|
||||
def states(opts, functions, utils, whitelist=None):
|
||||
'''
|
||||
Returns the state modules
|
||||
|
||||
|
@ -402,13 +402,16 @@ def states(opts, functions, whitelist=None):
|
|||
import salt.loader
|
||||
|
||||
__opts__ = salt.config.minion_config('/etc/salt/minion')
|
||||
statemods = salt.loader.states(__opts__, None)
|
||||
statemods = salt.loader.states(__opts__, None, None)
|
||||
'''
|
||||
return LazyLoader(_module_dirs(opts, 'states', 'states'),
|
||||
ret = LazyLoader(_module_dirs(opts, 'states', 'states'),
|
||||
opts,
|
||||
tag='states',
|
||||
pack={'__salt__': functions},
|
||||
whitelist=whitelist)
|
||||
ret.pack['__states__'] = ret
|
||||
ret.pack['__utils__'] = utils
|
||||
return ret
|
||||
|
||||
|
||||
def beacons(opts, functions, context=None):
|
||||
|
@ -451,10 +454,6 @@ def log_handlers(opts):
|
|||
|
||||
:param dict opts: The Salt options dictionary
|
||||
'''
|
||||
pack = {
|
||||
'__grains__': grains(opts),
|
||||
'__salt__': minion_mods(opts)
|
||||
}
|
||||
ret = LazyLoader(_module_dirs(opts,
|
||||
'log_handlers',
|
||||
'log_handlers',
|
||||
|
@ -462,7 +461,6 @@ def log_handlers(opts):
|
|||
base_path=os.path.join(SALT_BASE_PATH, 'log')),
|
||||
opts,
|
||||
tag='log_handlers',
|
||||
pack=pack
|
||||
)
|
||||
return FilterDictWrapper(ret, '.setup_handlers')
|
||||
|
||||
|
|
|
@ -81,6 +81,7 @@ from __future__ import absolute_import
|
|||
import logging
|
||||
|
||||
# Import salt libs
|
||||
import salt.loader
|
||||
from salt.log import LOG_LEVELS
|
||||
|
||||
# Import 3rd party libs
|
||||
|
@ -92,6 +93,8 @@ except ImportError:
|
|||
HAS_RAVEN = False
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
__grains__ = {}
|
||||
__salt__ = {}
|
||||
|
||||
# Define the module's virtual name
|
||||
__virtualname__ = 'sentry'
|
||||
|
@ -99,6 +102,8 @@ __virtualname__ = 'sentry'
|
|||
|
||||
def __virtual__():
|
||||
if HAS_RAVEN is True:
|
||||
__grains__ = salt.loader.grains(__opts__)
|
||||
__salt__ = salt.loader.minion_mods(__opts__)
|
||||
return __virtualname__
|
||||
return False
|
||||
|
||||
|
|
|
@ -534,7 +534,7 @@ class SMinion(MinionBase):
|
|||
# TODO: remove
|
||||
self.function_errors = {} # Keep the funcs clean
|
||||
self.returners = salt.loader.returners(self.opts, self.functions)
|
||||
self.states = salt.loader.states(self.opts, self.functions)
|
||||
self.states = salt.loader.states(self.opts, self.functions, self.utils)
|
||||
self.rend = salt.loader.render(self.opts, self.functions)
|
||||
self.matcher = Matcher(self.opts, self.functions)
|
||||
self.functions['sys.reload_modules'] = self.gen_modules
|
||||
|
@ -578,7 +578,9 @@ class MasterMinion(object):
|
|||
if self.mk_returners:
|
||||
self.returners = salt.loader.returners(self.opts, self.functions)
|
||||
if self.mk_states:
|
||||
self.states = salt.loader.states(self.opts, self.functions)
|
||||
self.states = salt.loader.states(self.opts,
|
||||
self.functions,
|
||||
self.utils)
|
||||
if self.mk_rend:
|
||||
self.rend = salt.loader.render(self.opts, self.functions)
|
||||
if self.mk_matcher:
|
||||
|
|
|
@ -96,16 +96,16 @@ def _create_pbuilders(env):
|
|||
set -e
|
||||
cat > "/etc/apt/preferences" << EOF
|
||||
|
||||
Package: python-abalaster
|
||||
Package: python-alabaster
|
||||
Pin: release a=testing
|
||||
Pin-Priority: 950
|
||||
|
||||
Package: python-sphinx
|
||||
Pin: release a=experimental
|
||||
Pin: release a=testing
|
||||
Pin-Priority: 900
|
||||
|
||||
Package: sphinx-common
|
||||
Pin: release a=experimental
|
||||
Pin: release a=testing
|
||||
Pin-Priority: 900
|
||||
|
||||
Package: *
|
||||
|
@ -139,7 +139,7 @@ if [ -n "${DIST}" ]; then
|
|||
APTCACHE="/var/cache/pbuilder/$DIST/aptcache"
|
||||
fi
|
||||
HOOKDIR="${HOME}/.pbuilder-hooks"
|
||||
OTHERMIRROR="deb http://ftp.us.debian.org/debian/ testing main contrib non-free | deb http://ftp.us.debian.org/debian/ experimental main contrib non-free"
|
||||
OTHERMIRROR="deb http://ftp.us.debian.org/debian/ testing main contrib non-free | deb http://ftp.us.debian.org/debian/ unstable main contrib non-free | deb http://ftp.us.debian.org/debian/ experimental main contrib non-free"
|
||||
'''
|
||||
home = os.path.expanduser('~')
|
||||
pbuilder_hooksdir = os.path.join(home, '.pbuilder-hooks')
|
||||
|
@ -244,7 +244,8 @@ def make_src_pkg(dest_dir, spec, sources, env=None, template=None, saltenv='base
|
|||
|
||||
frontname = salttarball.split('.tar.gz')
|
||||
salttar_name = frontname[0]
|
||||
debname = salttar_name.replace('-', '_')
|
||||
k = salttar_name.rfind('-')
|
||||
debname = salttar_name[:k] + '_' + salttar_name[k+1:]
|
||||
debname += '+ds'
|
||||
debname_orig = debname + '.orig.tar.gz'
|
||||
abspath_debname = os.path.join(tree_base, debname)
|
||||
|
@ -270,12 +271,11 @@ def make_src_pkg(dest_dir, spec, sources, env=None, template=None, saltenv='base
|
|||
__salt__['cmd.run'](cmd)
|
||||
|
||||
for dfile in os.listdir(tree_base):
|
||||
if dfile.startswith('salt_'):
|
||||
if not dfile.endswith('.build'):
|
||||
full = os.path.join(tree_base, dfile)
|
||||
trgt = os.path.join(dest_dir, dfile)
|
||||
shutil.copy(full, trgt)
|
||||
ret.append(trgt)
|
||||
if not dfile.endswith('.build'):
|
||||
full = os.path.join(tree_base, dfile)
|
||||
trgt = os.path.join(dest_dir, dfile)
|
||||
shutil.copy(full, trgt)
|
||||
ret.append(trgt)
|
||||
|
||||
return ret
|
||||
|
||||
|
@ -324,12 +324,8 @@ def build(runas, tgt, dest_dir, spec, sources, deps, env, template, saltenv='bas
|
|||
|
||||
for bfile in os.listdir(results_dir):
|
||||
full = os.path.join(results_dir, bfile)
|
||||
if bfile.endswith('.deb'):
|
||||
bdist = os.path.join(dest_dir, bfile)
|
||||
shutil.copy(full, bdist)
|
||||
else:
|
||||
with salt.utils.fopen(full, 'r') as fp_:
|
||||
ret[bfile] = fp_.read()
|
||||
bdist = os.path.join(dest_dir, bfile)
|
||||
shutil.copy(full, bdist)
|
||||
shutil.rmtree(results_dir)
|
||||
shutil.rmtree(dsc_dir)
|
||||
return ret
|
||||
|
@ -362,7 +358,7 @@ Pull: jessie
|
|||
|
||||
if keyid is not None:
|
||||
with salt.utils.fopen(repoconfdist, 'a') as fow:
|
||||
fow.write('Signwith: {0}\n'.format(keyid))
|
||||
fow.write('SignWith: {0}\n'.format(keyid))
|
||||
|
||||
repocfg_opts = _get_repo_env(env)
|
||||
repoconfopts = os.path.join(repoconf, 'options')
|
||||
|
|
|
@ -3646,7 +3646,7 @@ def check_managed(
|
|||
__clean_tmp(sfn)
|
||||
return False, comments
|
||||
changes = check_file_meta(name, sfn, source, source_sum, user,
|
||||
group, mode, saltenv, template, contents)
|
||||
group, mode, saltenv, contents)
|
||||
__clean_tmp(sfn)
|
||||
if changes:
|
||||
log.info(changes)
|
||||
|
@ -3705,7 +3705,7 @@ def check_managed_changes(
|
|||
__clean_tmp(sfn)
|
||||
return False, comments
|
||||
changes = check_file_meta(name, sfn, source, source_sum, user,
|
||||
group, mode, saltenv, template, contents)
|
||||
group, mode, saltenv, contents)
|
||||
__clean_tmp(sfn)
|
||||
return changes
|
||||
|
||||
|
@ -3719,7 +3719,6 @@ def check_file_meta(
|
|||
group,
|
||||
mode,
|
||||
saltenv,
|
||||
template=None,
|
||||
contents=None):
|
||||
'''
|
||||
Check for the changes in the file metadata.
|
||||
|
@ -3734,6 +3733,37 @@ def check_file_meta(
|
|||
|
||||
Supported hash types include sha512, sha384, sha256, sha224, sha1, and
|
||||
md5.
|
||||
|
||||
name
|
||||
Path to file destination
|
||||
|
||||
sfn
|
||||
Template-processed source file contents
|
||||
|
||||
source
|
||||
URL to file source
|
||||
|
||||
source_sum
|
||||
File checksum information as a dictionary
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
{hash_type: md5, hsum: <md5sum>}
|
||||
|
||||
user
|
||||
Destination file user owner
|
||||
|
||||
group
|
||||
Destination file group owner
|
||||
|
||||
mode
|
||||
Destination file permissions mode
|
||||
|
||||
saltenv
|
||||
Salt environment used to resolve source files
|
||||
|
||||
contents
|
||||
File contents
|
||||
'''
|
||||
changes = {}
|
||||
if not source_sum:
|
||||
|
|
|
@ -16,6 +16,7 @@ from distutils.version import LooseVersion as _LooseVersion
|
|||
|
||||
# Import salt libs
|
||||
import salt.utils
|
||||
import salt.utils.files
|
||||
from salt.exceptions import SaltInvocationError, CommandExecutionError
|
||||
from salt.ext import six
|
||||
from salt.ext.six.moves.urllib.parse import urlparse as _urlparse # pylint: disable=no-name-in-module,import-error
|
||||
|
@ -175,6 +176,20 @@ def _git_run(command, cwd=None, runas=None, identity=None,
|
|||
'''
|
||||
env = {}
|
||||
|
||||
for item in command:
|
||||
try:
|
||||
if '<redacted>' in _remove_sensitive_data(item):
|
||||
loglevel = 'quiet'
|
||||
log.debug(
|
||||
'HTTPS user/password in git command, the command and '
|
||||
'output will redacted'
|
||||
)
|
||||
break
|
||||
except TypeError:
|
||||
continue
|
||||
else:
|
||||
loglevel = 'debug'
|
||||
|
||||
if identity:
|
||||
stderrs = []
|
||||
|
||||
|
@ -225,6 +240,7 @@ def _git_run(command, cwd=None, runas=None, identity=None,
|
|||
runas=runas,
|
||||
env=env,
|
||||
python_shell=False,
|
||||
output_loglevel=loglevel,
|
||||
ignore_retcode=ignore_retcode,
|
||||
**kwargs)
|
||||
finally:
|
||||
|
@ -249,6 +265,7 @@ def _git_run(command, cwd=None, runas=None, identity=None,
|
|||
runas=runas,
|
||||
env=env,
|
||||
python_shell=False,
|
||||
output_loglevel=loglevel,
|
||||
ignore_retcode=ignore_retcode,
|
||||
**kwargs)
|
||||
|
||||
|
@ -259,7 +276,9 @@ def _git_run(command, cwd=None, runas=None, identity=None,
|
|||
gitcommand = ' '.join(command) \
|
||||
if isinstance(command, list) \
|
||||
else command
|
||||
msg = 'Command \'{0}\' failed'.format(gitcommand)
|
||||
msg = 'Command \'{0}\' failed'.format(
|
||||
_remove_sensitive_data(gitcommand)
|
||||
)
|
||||
if result['stderr']:
|
||||
msg += ': {0}'.format(
|
||||
_remove_sensitive_data(result['stderr'])
|
||||
|
@ -287,8 +306,7 @@ def _remove_sensitive_data(output):
|
|||
# support flags even in Python 2.7.
|
||||
url_re = '(https?)://.*@'
|
||||
redacted = r'\1://<redacted>@'
|
||||
if sys.version_info[0] > 2 \
|
||||
or (sys.version_info[0] == 2 and sys.version_info[1] >= 7):
|
||||
if sys.version_info >= (2, 7):
|
||||
# re.sub() supports flags as of 2.7, use this to do a case-insensitive
|
||||
# match.
|
||||
return re.sub(url_re, redacted, output, flags=re.IGNORECASE)
|
||||
|
@ -493,7 +511,7 @@ def archive(cwd,
|
|||
return True
|
||||
|
||||
|
||||
def branch(cwd, name, opts='', user=None, ignore_retcode=False):
|
||||
def branch(cwd, name=None, opts='', user=None, ignore_retcode=False):
|
||||
'''
|
||||
Interface to `git-branch(1)`_
|
||||
|
||||
|
@ -501,7 +519,8 @@ def branch(cwd, name, opts='', user=None, ignore_retcode=False):
|
|||
The path to the git checkout
|
||||
|
||||
name
|
||||
Name of the branch on which to operate
|
||||
Name of the branch on which to operate. If not specified, the current
|
||||
branch will be assumed.
|
||||
|
||||
opts
|
||||
Any additional options to add to the command line, in a single string
|
||||
|
@ -546,7 +565,8 @@ def branch(cwd, name, opts='', user=None, ignore_retcode=False):
|
|||
cwd = _expand_path(cwd, user)
|
||||
command = ['git', 'branch']
|
||||
command.extend(_format_opts(opts))
|
||||
command.append(name)
|
||||
if name is not None:
|
||||
command.append(name)
|
||||
_git_run(command, cwd=cwd, runas=user, ignore_retcode=ignore_retcode)
|
||||
return True
|
||||
|
||||
|
@ -2373,7 +2393,7 @@ def remote_get(cwd, remote='origin', user=None, ignore_retcode=False):
|
|||
|
||||
.. versionadded:: 2015.8.0
|
||||
|
||||
CLI Example:
|
||||
CLI Examples:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
|
@ -2390,6 +2410,80 @@ def remote_get(cwd, remote='origin', user=None, ignore_retcode=False):
|
|||
return all_remotes[remote]
|
||||
|
||||
|
||||
def remote_refs(url,
|
||||
heads=False,
|
||||
tags=False,
|
||||
user=None,
|
||||
identity=None,
|
||||
https_user=None,
|
||||
https_pass=None,
|
||||
ignore_retcode=False):
|
||||
'''
|
||||
.. versionadded:: 2015.8.0
|
||||
|
||||
Return the remote refs for the specified URL
|
||||
|
||||
url
|
||||
URL of the remote repository
|
||||
|
||||
heads : False
|
||||
Restrict output to heads. Can be combined with ``tags``.
|
||||
|
||||
tags : False
|
||||
Restrict output to tags. Can be combined with ``heads``.
|
||||
|
||||
user
|
||||
User under which to run the git command. By default, the command is run
|
||||
by the user under which the minion is running.
|
||||
|
||||
identity
|
||||
Path to a private key to use for ssh URLs
|
||||
|
||||
.. warning::
|
||||
|
||||
Key must be passphraseless to allow for non-interactive login. For
|
||||
greater security with passphraseless private keys, see the
|
||||
`sshd(8)`_ manpage for information on securing the keypair from the
|
||||
remote side in the ``authorized_keys`` file.
|
||||
|
||||
.. _`sshd(8)`: http://www.man7.org/linux/man-pages/man8/sshd.8.html#AUTHORIZED_KEYS_FILE%20FORMAT
|
||||
|
||||
https_user
|
||||
Set HTTP Basic Auth username. Only accepted for HTTPS URLs.
|
||||
|
||||
https_pass
|
||||
Set HTTP Basic Auth password. Only accepted for HTTPS URLs.
|
||||
|
||||
ignore_retcode : False
|
||||
If ``True``, do not log an error to the minion log if the git command
|
||||
returns a nonzero exit status.
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt myminion git.remote_refs https://github.com/saltstack/salt.git
|
||||
'''
|
||||
command = ['git', 'ls-remote']
|
||||
if heads:
|
||||
command.append('--heads')
|
||||
if tags:
|
||||
command.append('--tags')
|
||||
command.append(_add_http_basic_auth(url, https_user, https_pass))
|
||||
output = _git_run(command,
|
||||
user=user,
|
||||
identity=identity,
|
||||
ignore_retcode=ignore_retcode)['stdout']
|
||||
ret = {}
|
||||
for line in salt.utils.itersplit(output, '\n'):
|
||||
try:
|
||||
sha1_hash, ref_name = line.split(None, 1)
|
||||
except ValueError:
|
||||
continue
|
||||
ret[ref_name] = sha1_hash
|
||||
return ret
|
||||
|
||||
|
||||
def remote_set(cwd,
|
||||
url,
|
||||
remote='origin',
|
||||
|
|
|
@ -416,25 +416,45 @@ def check(set=None, entry=None, family='ipv4'):
|
|||
if not entry:
|
||||
return 'Error: Entry needs to be specified'
|
||||
|
||||
if isinstance(entry, list):
|
||||
entries = entry
|
||||
else:
|
||||
if entry.find("-") != -1 and entry.count("-") == 1:
|
||||
start, end = entry.split("-")
|
||||
|
||||
entries = [str(ipaddress.ip_address(ip)) for ip in range(
|
||||
ipaddress.ip_address(start),
|
||||
ipaddress.ip_address(end) + 1
|
||||
)]
|
||||
elif entry.find("/") != -1 and entry.count("/") == 1:
|
||||
entries = [str(ip) for ip in ipaddress.ip_network(entry)]
|
||||
else:
|
||||
entries = [entry]
|
||||
|
||||
settype = _find_set_type(set)
|
||||
if not settype:
|
||||
return 'Error: Set {0} does not exist'.format(set)
|
||||
|
||||
if isinstance(entry, list):
|
||||
entries = entry
|
||||
else:
|
||||
if entry.find('-') != -1 and entry.count('-') == 1:
|
||||
start, end = entry.split('-')
|
||||
|
||||
if settype == 'hash:ip':
|
||||
entries = [str(ipaddress.ip_address(ip)) for ip in range(
|
||||
ipaddress.ip_address(start),
|
||||
ipaddress.ip_address(end) + 1
|
||||
)]
|
||||
|
||||
elif settype == 'hash:net':
|
||||
networks = ipaddress.summarize_address_range(ipaddress.ip_address(start),
|
||||
ipaddress.ip_address(end))
|
||||
entries = []
|
||||
for network in networks:
|
||||
entries.append(network.with_prefixlen)
|
||||
else:
|
||||
entries = [entry]
|
||||
|
||||
elif entry.find('/') != -1 and entry.count('/') == 1:
|
||||
if settype == 'hash:ip':
|
||||
entries = [str(ip) for ip in ipaddress.ip_network(entry)]
|
||||
elif settype == 'hash:net':
|
||||
_entries = [str(ip) for ip in ipaddress.ip_network(entry)]
|
||||
if len(_entries) == 1:
|
||||
entries = [_entries[0]]
|
||||
else:
|
||||
entries = [entry]
|
||||
else:
|
||||
entries = [entry]
|
||||
else:
|
||||
entries = [entry]
|
||||
|
||||
current_members = _find_set_members(set)
|
||||
for entry in entries:
|
||||
if entry not in current_members:
|
||||
|
|
420
salt/modules/smartos_legacy.py
Normal file
420
salt/modules/smartos_legacy.py
Normal file
|
@ -0,0 +1,420 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
'''
|
||||
Module for managing VMs on SmartOS
|
||||
'''
|
||||
from __future__ import absolute_import
|
||||
|
||||
# Import Python libs
|
||||
import json
|
||||
|
||||
# Import Salt libs
|
||||
from salt.exceptions import CommandExecutionError
|
||||
import salt.utils
|
||||
import salt.utils.decorators as decorators
|
||||
import salt.ext.six as six
|
||||
try:
|
||||
from shlex import quote as _cmd_quote # pylint: disable=E0611
|
||||
except ImportError:
|
||||
from pipes import quote as _cmd_quote
|
||||
|
||||
|
||||
# Define the module's virtual name
|
||||
__virtualname__ = 'virt'
|
||||
|
||||
|
||||
@decorators.memoize
|
||||
def _check_vmadm():
|
||||
'''
|
||||
Looks to see if vmadm is present on the system
|
||||
'''
|
||||
return salt.utils.which('vmadm')
|
||||
|
||||
|
||||
def _check_dladm():
|
||||
'''
|
||||
Looks to see if dladm is present on the system
|
||||
'''
|
||||
return salt.utils.which('dladm')
|
||||
|
||||
|
||||
def __virtual__():
|
||||
'''
|
||||
Provides virt on SmartOS
|
||||
'''
|
||||
if salt.utils.is_smartos_globalzone() and _check_vmadm():
|
||||
# Warn about deprication
|
||||
salt.utils.warn_until(
|
||||
'Nitrogen',
|
||||
'The \'virt\' module on SmartOS has been depricated'
|
||||
'please use the smartos module or vmadm wrapper module.'
|
||||
)
|
||||
return __virtualname__
|
||||
return False
|
||||
|
||||
|
||||
def _exit_status(retcode):
|
||||
'''
|
||||
Translate exit status of vmadm
|
||||
'''
|
||||
ret = {0: 'Successful completion.',
|
||||
1: 'An error occurred.',
|
||||
2: 'Usage error.'}[retcode]
|
||||
return ret
|
||||
|
||||
|
||||
def _gen_zone_json(**kwargs):
|
||||
'''
|
||||
Generate the JSON for OS virtualization creation
|
||||
|
||||
Example layout (all keys are mandatory) :
|
||||
|
||||
{"brand": "joyent",
|
||||
"image_uuid": "9eac5c0c-a941-11e2-a7dc-57a6b041988f",
|
||||
"alias": "myname",
|
||||
"hostname": "www.domain.com",
|
||||
"max_physical_memory": 2048,
|
||||
"quota": 10,
|
||||
"nics": [
|
||||
{
|
||||
"nic_tag": "admin",
|
||||
"ip": "192.168.0.1",
|
||||
"netmask": "255.255.255.0",
|
||||
"gateway": "192.168.0.254"
|
||||
}
|
||||
]}
|
||||
'''
|
||||
ret = {}
|
||||
nics = {}
|
||||
check_args = (
|
||||
'image_uuid', 'alias', 'hostname',
|
||||
'max_physical_memory', 'quota', 'nic_tag',
|
||||
'ip', 'netmask', 'gateway')
|
||||
nics_args = ('nic_tag', 'ip', 'netmask', 'gateway')
|
||||
# Lazy check of arguments
|
||||
if not all(key in kwargs for key in check_args):
|
||||
raise CommandExecutionError('Missing arguments for JSON generation')
|
||||
# This one is mandatory for OS virt
|
||||
ret.update(brand='joyent')
|
||||
# Populate JSON without NIC information
|
||||
ret.update((key, kwargs[key])
|
||||
for key in check_args
|
||||
if key in kwargs and key not in nics_args)
|
||||
# NICs are defined in a subdict
|
||||
nics.update((key, kwargs[key])
|
||||
for key in nics_args
|
||||
if key in kwargs)
|
||||
ret.update(nics=[nics])
|
||||
|
||||
return json.dumps(ret)
|
||||
|
||||
|
||||
def init(**kwargs):
|
||||
'''
|
||||
Initialize a new VM
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' virt.init image_uuid='...' alias='...' [...]
|
||||
'''
|
||||
ret = {}
|
||||
vmadm = _check_vmadm()
|
||||
check_zone_args = (
|
||||
'image_uuid', 'alias', 'hostname',
|
||||
'max_physical_memory', 'quota', 'nic_tag',
|
||||
'ip', 'netmask', 'gateway')
|
||||
check_kvm_args = ('to_be_implemented')
|
||||
# check routines for mandatory arguments
|
||||
# Zones
|
||||
if all(key in kwargs for key in check_zone_args):
|
||||
ret = _gen_zone_json(**kwargs)
|
||||
# validation first
|
||||
cmd = 'echo {0} | {1} validate create'.format(_cmd_quote(ret), _cmd_quote(vmadm))
|
||||
res = __salt__['cmd.run_all'](cmd, python_shell=True)
|
||||
retcode = res['retcode']
|
||||
if retcode != 0:
|
||||
return CommandExecutionError(_exit_status(retcode))
|
||||
# if succedeed, proceed to the VM creation
|
||||
cmd = 'echo {0} | {1} create'.format(_cmd_quote(ret), _cmd_quote(vmadm))
|
||||
res = __salt__['cmd.run_all'](cmd, python_shell=True)
|
||||
retcode = res['retcode']
|
||||
if retcode != 0:
|
||||
return CommandExecutionError(_exit_status(retcode))
|
||||
return True
|
||||
# KVM
|
||||
elif all(key in kwargs for key in check_kvm_args):
|
||||
raise CommandExecutionError('KVM is not yet implemented')
|
||||
else:
|
||||
raise CommandExecutionError('Missing mandatory arguments')
|
||||
|
||||
|
||||
def list_vms():
|
||||
'''
|
||||
Return a list of virtual machine names on the minion
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' virt.list_vms
|
||||
'''
|
||||
vmadm = _check_vmadm()
|
||||
cmd = '{0} list'.format(vmadm)
|
||||
vms = []
|
||||
res = __salt__['cmd.run_all'](cmd)
|
||||
retcode = res['retcode']
|
||||
if retcode != 0:
|
||||
raise CommandExecutionError(_exit_status(retcode))
|
||||
for key, uuid in six.iteritems(res):
|
||||
if key == "stdout":
|
||||
vms.append(uuid)
|
||||
return vms
|
||||
|
||||
|
||||
def list_active_vms():
|
||||
'''
|
||||
Return a list of uuids for active virtual machine on the minion
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' virt.list_active_vms
|
||||
'''
|
||||
vmadm = _check_vmadm()
|
||||
cmd = '{0} lookup state=running'.format(vmadm)
|
||||
vms = []
|
||||
res = __salt__['cmd.run_all'](cmd)
|
||||
retcode = res['retcode']
|
||||
if retcode != 0:
|
||||
raise CommandExecutionError(_exit_status(retcode))
|
||||
for key, uuid in six.iteritems(res):
|
||||
if key == "stdout":
|
||||
vms.append(uuid)
|
||||
return vms
|
||||
|
||||
|
||||
def list_inactive_vms():
|
||||
'''
|
||||
Return a list of uuids for inactive virtual machine on the minion
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' virt.list_inactive_vms
|
||||
'''
|
||||
vmadm = _check_vmadm()
|
||||
cmd = '{0} lookup state=stopped'.format(vmadm)
|
||||
vms = []
|
||||
res = __salt__['cmd.run_all'](cmd)
|
||||
retcode = res['retcode']
|
||||
if retcode != 0:
|
||||
raise CommandExecutionError(_exit_status(retcode))
|
||||
for key, uuid in six.iteritems(res):
|
||||
if key == "stdout":
|
||||
vms.append(uuid)
|
||||
return vms
|
||||
|
||||
|
||||
def vm_info(uuid=None):
|
||||
'''
|
||||
Return a dict with information about the specified VM on this CN
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' virt.vm_info <uuid>
|
||||
'''
|
||||
info = {}
|
||||
if not uuid:
|
||||
raise CommandExecutionError('UUID parameter is mandatory')
|
||||
vmadm = _check_vmadm()
|
||||
cmd = '{0} get {1}'.format(vmadm, uuid)
|
||||
res = __salt__['cmd.run_all'](cmd)
|
||||
retcode = res['retcode']
|
||||
if retcode != 0:
|
||||
raise CommandExecutionError(_exit_status(retcode))
|
||||
info = res['stdout']
|
||||
return info
|
||||
|
||||
|
||||
def start(uuid=None):
|
||||
'''
|
||||
Start a defined domain
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' virt.start <uuid>
|
||||
'''
|
||||
if not uuid:
|
||||
raise CommandExecutionError('UUID parameter is mandatory')
|
||||
if uuid in list_active_vms():
|
||||
raise CommandExecutionError('The specified vm is already running')
|
||||
vmadm = _check_vmadm()
|
||||
cmd = '{0} start {1}'.format(vmadm, uuid)
|
||||
res = __salt__['cmd.run_all'](cmd)
|
||||
retcode = res['retcode']
|
||||
if retcode != 0:
|
||||
raise CommandExecutionError(_exit_status(retcode))
|
||||
if uuid in list_active_vms():
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
|
||||
def shutdown(uuid=None):
|
||||
'''
|
||||
Send a soft shutdown signal to the named vm
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' virt.shutdown <uuid>
|
||||
'''
|
||||
if not uuid:
|
||||
raise CommandExecutionError('UUID parameter is mandatory')
|
||||
if uuid in list_inactive_vms():
|
||||
raise CommandExecutionError('The specified vm is already stopped')
|
||||
vmadm = _check_vmadm()
|
||||
cmd = '{0} stop {1}'.format(vmadm, uuid)
|
||||
res = __salt__['cmd.run_all'](cmd)
|
||||
retcode = res['retcode']
|
||||
if retcode != 0:
|
||||
raise CommandExecutionError(_exit_status(retcode))
|
||||
if uuid in list_inactive_vms():
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
|
||||
def reboot(uuid=None):
|
||||
'''
|
||||
Reboot a domain via ACPI request
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' virt.reboot <uuid>
|
||||
'''
|
||||
if not uuid:
|
||||
raise CommandExecutionError('UUID parameter is mandatory')
|
||||
if uuid in list_inactive_vms():
|
||||
raise CommandExecutionError('The specified vm is stopped')
|
||||
vmadm = _check_vmadm()
|
||||
cmd = '{0} reboot {1}'.format(vmadm, uuid)
|
||||
res = __salt__['cmd.run_all'](cmd)
|
||||
retcode = res['retcode']
|
||||
if retcode != 0:
|
||||
raise CommandExecutionError(_exit_status(retcode))
|
||||
if uuid in list_active_vms():
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
|
||||
def destroy(uuid=None):
|
||||
'''
|
||||
Hard power down the virtual machine, this is equivalent to pulling the power
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' virt.destroy <uuid>
|
||||
'''
|
||||
if not uuid:
|
||||
raise CommandExecutionError('UUID parameter is mandatory')
|
||||
vmadm = _check_vmadm()
|
||||
cmd = '{0} delete {1}'.format(vmadm, uuid)
|
||||
res = __salt__['cmd.run_all'](cmd)
|
||||
retcode = res['retcode']
|
||||
if retcode != 0:
|
||||
raise CommandExecutionError(_exit_status(retcode))
|
||||
return True
|
||||
|
||||
|
||||
def vm_virt_type(uuid=None):
|
||||
'''
|
||||
Return VM virtualization type : OS or KVM
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' virt.vm_virt_type <uuid>
|
||||
'''
|
||||
if not uuid:
|
||||
raise CommandExecutionError('UUID parameter is mandatory')
|
||||
vmadm = _check_vmadm()
|
||||
cmd = '{0} list -p -o type uuid={1}'.format(vmadm, uuid)
|
||||
res = __salt__['cmd.run_all'](cmd)
|
||||
retcode = res['retcode']
|
||||
if retcode != 0:
|
||||
raise CommandExecutionError(_exit_status(retcode))
|
||||
ret = res['stdout']
|
||||
if ret != '':
|
||||
return ret
|
||||
raise CommandExecutionError('We can\'t determine the type of this VM')
|
||||
|
||||
|
||||
def setmem(uuid, memory):
|
||||
'''
|
||||
Change the amount of memory allocated to VM.
|
||||
<memory> is to be specified in MB.
|
||||
|
||||
Note for KVM : this would require a restart of the VM.
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' virt.setmem <uuid> 512
|
||||
'''
|
||||
if not uuid:
|
||||
raise CommandExecutionError('UUID parameter is mandatory')
|
||||
# We want to determine the nature of the VM
|
||||
vmtype = vm_virt_type(uuid)
|
||||
vmadm = _check_vmadm()
|
||||
warning = []
|
||||
if vmtype == 'OS':
|
||||
cmd = '{0} update {1} max_physical_memory={2}'.format(vmadm, uuid, memory)
|
||||
elif vmtype == 'KVM':
|
||||
cmd = '{0} update {1} ram={2}'.format(vmadm, uuid, memory)
|
||||
warning = 'Done, but please note this will require a restart of the VM'
|
||||
retcode = __salt__['cmd.retcode'](cmd)
|
||||
if retcode != 0:
|
||||
raise CommandExecutionError(_exit_status(retcode))
|
||||
if not warning:
|
||||
return True
|
||||
return warning
|
||||
|
||||
|
||||
def get_macs(uuid=None):
|
||||
'''
|
||||
Return a list off MAC addresses from the named VM
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' virt.get_macs <uuid>
|
||||
'''
|
||||
if not uuid:
|
||||
raise CommandExecutionError('UUID parameter is mandatory')
|
||||
dladm = _check_dladm()
|
||||
cmd = '{0} show-vnic -o MACADDRESS -p -z {1}'.format(dladm, uuid)
|
||||
res = __salt__['cmd.run_all'](cmd)
|
||||
ret = res['stdout']
|
||||
if ret != '':
|
||||
return ret
|
||||
raise CommandExecutionError('We can\'t find the MAC address of this VM')
|
||||
|
||||
|
||||
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
|
|
@ -45,7 +45,10 @@ def get_dns_servers(interface='Local Area Connection'):
|
|||
for iface in c.Win32_NetworkAdapter(NetEnabled=True):
|
||||
if interface == iface.NetConnectionID:
|
||||
iface_config = c.Win32_NetworkAdapterConfiguration(Index=iface.Index).pop()
|
||||
return list(iface_config.DNSServerSearchOrder)
|
||||
try:
|
||||
return list(iface_config.DNSServerSearchOrder)
|
||||
except TypeError:
|
||||
return []
|
||||
log.debug('Interface "{0}" not found'.format(interface))
|
||||
return False
|
||||
|
||||
|
|
|
@ -19,10 +19,13 @@ import salt.utils
|
|||
import salt.loader
|
||||
import salt.template
|
||||
from salt.exceptions import CommandExecutionError, SaltRenderError
|
||||
# pylint: disable=unused-import
|
||||
from salt.runners.winrepo import (
|
||||
genrepo as _genrepo,
|
||||
update_git_repos as _update_git_repos
|
||||
update_git_repos as _update_git_repos,
|
||||
PER_REMOTE_PARAMS
|
||||
)
|
||||
# pylint: enable=unused-import
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
|
|
@ -294,7 +294,8 @@ def load_states():
|
|||
__opts__['grains'] = salt.loader.grains(__opts__)
|
||||
__opts__['pillar'] = __pillar__
|
||||
lazy_funcs = salt.loader.minion_mods(__opts__)
|
||||
lazy_states = salt.loader.states(__opts__, lazy_funcs)
|
||||
lazy_utils = salt.loader.utils(__opts__)
|
||||
lazy_states = salt.loader.states(__opts__, lazy_funcs, lazy_utils)
|
||||
|
||||
# TODO: some way to lazily do this? This requires loading *all* state modules
|
||||
for key, func in six.iteritems(lazy_states):
|
||||
|
|
|
@ -173,75 +173,81 @@ def update_git_repos(opts=None, masterless=False):
|
|||
else:
|
||||
winrepo_remotes = opts['winrepo_remotes']
|
||||
|
||||
if not any((salt.utils.gitfs.HAS_GITPYTHON, salt.utils.gitfs.HAS_PYGIT2)):
|
||||
# Use legacy code
|
||||
if not salt.utils.is_windows():
|
||||
# Don't warn on Windows, because Windows can't do cool things like
|
||||
# use pygit2. It has to fall back to git.latest.
|
||||
salt.utils.warn_until(
|
||||
'Nitrogen',
|
||||
'winrepo git support now requires either GitPython or pygit2. '
|
||||
'Please install either GitPython >= {0} (or pygit2 >= {1} with '
|
||||
'libgit2 >= {2}), clear out the winrepo_dir ({3}), and '
|
||||
'restart the salt-master service.'.format(
|
||||
salt.utils.gitfs.GITPYTHON_MINVER,
|
||||
salt.utils.gitfs.PYGIT2_MINVER,
|
||||
salt.utils.gitfs.LIBGIT2_MINVER,
|
||||
winrepo_dir
|
||||
)
|
||||
)
|
||||
ret = {}
|
||||
for remote_info in winrepo_remotes:
|
||||
if '/' in remote_info:
|
||||
targetname = remote_info.split('/')[-1]
|
||||
else:
|
||||
targetname = remote_info
|
||||
rev = None
|
||||
# If a revision is specified, use it.
|
||||
try:
|
||||
rev, remote_url = remote_info.strip().split()
|
||||
except ValueError:
|
||||
remote_url = remote_info
|
||||
gittarget = os.path.join(winrepo_dir, targetname).replace('.', '_')
|
||||
if masterless:
|
||||
result = __salt__['state.single']('git.latest',
|
||||
name=remote_url,
|
||||
rev=rev,
|
||||
branch='winrepo',
|
||||
target=gittarget,
|
||||
force_checkout=True,
|
||||
force_reset=True)
|
||||
if isinstance(result, list):
|
||||
# Errors were detected
|
||||
raise CommandExecutionError(
|
||||
'Failed up update winrepo_remotes: {0}'.format(
|
||||
'\n'.join(result)
|
||||
)
|
||||
winrepo_cfg = [(winrepo_remotes, winrepo_dir),
|
||||
(opts['winrepo_remotes_ng'], opts['winrepo_dir_ng'])]
|
||||
|
||||
ret = {}
|
||||
for remotes, base_dir in winrepo_cfg:
|
||||
if not any((salt.utils.gitfs.HAS_GITPYTHON, salt.utils.gitfs.HAS_PYGIT2)):
|
||||
# Use legacy code
|
||||
if not salt.utils.is_windows():
|
||||
# Don't warn on Windows, because Windows can't do cool things like
|
||||
# use pygit2. It has to fall back to git.latest.
|
||||
salt.utils.warn_until(
|
||||
'Nitrogen',
|
||||
'winrepo git support now requires either GitPython or pygit2. '
|
||||
'Please install either GitPython >= {0} (or pygit2 >= {1} with '
|
||||
'libgit2 >= {2}), clear out {3}, and restart the salt-master '
|
||||
'service.'.format(
|
||||
salt.utils.gitfs.GITPYTHON_MINVER,
|
||||
salt.utils.gitfs.PYGIT2_MINVER,
|
||||
salt.utils.gitfs.LIBGIT2_MINVER,
|
||||
base_dir
|
||||
)
|
||||
if 'name' not in result:
|
||||
# Highstate output dict, the results are actually nested
|
||||
# one level down.
|
||||
key = next(iter(result))
|
||||
result = result[key]
|
||||
else:
|
||||
mminion = salt.minion.MasterMinion(opts)
|
||||
result = mminion.states['git.latest'](remote_url,
|
||||
)
|
||||
winrepo_result = {}
|
||||
for remote_info in remotes:
|
||||
if '/' in remote_info:
|
||||
targetname = remote_info.split('/')[-1]
|
||||
else:
|
||||
targetname = remote_info
|
||||
rev = 'HEAD'
|
||||
# If a revision is specified, use it.
|
||||
try:
|
||||
rev, remote_url = remote_info.strip().split()
|
||||
except ValueError:
|
||||
remote_url = remote_info
|
||||
gittarget = os.path.join(base_dir, targetname).replace('.', '_')
|
||||
if masterless:
|
||||
result = __salt__['state.single']('git.latest',
|
||||
name=remote_url,
|
||||
rev=rev,
|
||||
branch='winrepo',
|
||||
target=gittarget,
|
||||
force_checkout=True,
|
||||
force_reset=True)
|
||||
ret[result['name']] = result['result']
|
||||
return ret
|
||||
else:
|
||||
# New winrepo code utilizing salt.utils.gitfs
|
||||
try:
|
||||
winrepo = salt.utils.gitfs.WinRepo(opts)
|
||||
winrepo.init_remotes(winrepo_remotes, PER_REMOTE_PARAMS)
|
||||
winrepo.fetch_remotes()
|
||||
winrepo.checkout()
|
||||
except Exception as exc:
|
||||
msg = 'Failed to update winrepo_remotes: {0}'.format(exc)
|
||||
log.error(msg, exc_info_on_loglevel=logging.DEBUG)
|
||||
return msg
|
||||
return winrepo.winrepo_dirs
|
||||
if isinstance(result, list):
|
||||
# Errors were detected
|
||||
raise CommandExecutionError(
|
||||
'Failed up update winrepo remotes: {0}'.format(
|
||||
'\n'.join(result)
|
||||
)
|
||||
)
|
||||
if 'name' not in result:
|
||||
# Highstate output dict, the results are actually nested
|
||||
# one level down.
|
||||
key = next(iter(result))
|
||||
result = result[key]
|
||||
else:
|
||||
mminion = salt.minion.MasterMinion(opts)
|
||||
result = mminion.states['git.latest'](remote_url,
|
||||
rev=rev,
|
||||
branch='winrepo',
|
||||
target=gittarget,
|
||||
force_checkout=True,
|
||||
force_reset=True)
|
||||
winrepo_result[result['name']] = result['result']
|
||||
ret.update(winrepo_result)
|
||||
else:
|
||||
# New winrepo code utilizing salt.utils.gitfs
|
||||
try:
|
||||
winrepo = salt.utils.gitfs.WinRepo(opts, base_dir)
|
||||
winrepo.init_remotes(remotes, PER_REMOTE_PARAMS)
|
||||
winrepo.fetch_remotes()
|
||||
winrepo.checkout()
|
||||
except Exception as exc:
|
||||
msg = 'Failed to update winrepo_remotes: {0}'.format(exc)
|
||||
log.error(msg, exc_info_on_loglevel=logging.DEBUG)
|
||||
return msg
|
||||
ret.update(winrepo.winrepo_dirs)
|
||||
return ret
|
||||
|
|
|
@ -81,7 +81,7 @@ class SPMClient(object):
|
|||
self.pkgdb = salt.loader.pkgdb(self.opts)
|
||||
self.db_conn = self.pkgdb[db_fun]()
|
||||
|
||||
self.files_prov = opts.get('spm_files_provider', 'roots')
|
||||
self.files_prov = opts.get('spm_files_provider', 'local')
|
||||
files_fun = '{0}.init'.format(self.files_prov)
|
||||
|
||||
self.pkgfiles = salt.loader.pkgfiles(self.opts)
|
||||
|
@ -186,7 +186,7 @@ class SPMClient(object):
|
|||
pkg_files = formula_tar.getmembers()
|
||||
# First pass: check for files that already exist
|
||||
existing_files = self.pkgfiles['{0}.check_existing'.format(self.files_prov)](
|
||||
name, pkg_files
|
||||
name, pkg_files, formula_def
|
||||
)
|
||||
|
||||
if existing_files and not self.opts['force']:
|
||||
|
@ -220,15 +220,16 @@ class SPMClient(object):
|
|||
digest = file_hash.hexdigest()
|
||||
|
||||
out_path = self.pkgfiles['{0}.install_file'.format(self.files_prov)](
|
||||
name, formula_tar, member, self.files_conn
|
||||
)
|
||||
self.pkgdb['{0}.register_file'.format(self.db_prov)](
|
||||
name,
|
||||
member,
|
||||
out_path,
|
||||
digest,
|
||||
self.db_conn
|
||||
name, formula_tar, member, formula_def, self.files_conn
|
||||
)
|
||||
if out_path is not False:
|
||||
self.pkgdb['{0}.register_file'.format(self.db_prov)](
|
||||
name,
|
||||
member,
|
||||
out_path,
|
||||
digest,
|
||||
self.db_conn
|
||||
)
|
||||
|
||||
formula_tar.close()
|
||||
|
||||
|
@ -570,6 +571,8 @@ class SPMClient(object):
|
|||
if not os.path.exists(self.opts['spm_build_dir']):
|
||||
os.mkdir(self.opts['spm_build_dir'])
|
||||
|
||||
self.formula_conf = formula_conf
|
||||
|
||||
formula_tar = tarfile.open(out_path, 'w:bz2')
|
||||
formula_tar.add(self.abspath, formula_conf['name'], filter=self._exclude)
|
||||
formula_tar.close()
|
||||
|
@ -581,8 +584,9 @@ class SPMClient(object):
|
|||
Exclude based on opts
|
||||
'''
|
||||
for item in self.opts['spm_build_exclude']:
|
||||
exclude_name = '{0}/{1}'.format(self.abspath, item)
|
||||
if member.name.startswith(exclude_name):
|
||||
if member.name.startswith('{0}/{1}'.format(self.formula_conf['name'], item)):
|
||||
return None
|
||||
elif member.name.startswith('{0}/{1}'.format(self.abspath, item)):
|
||||
return None
|
||||
return member
|
||||
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
'''
|
||||
This module allows SPM to use the local filesystem (``file_roots``) to install
|
||||
files for SPM.
|
||||
This module allows SPM to use the local filesystem to install files for SPM.
|
||||
|
||||
.. versionadded:: 2015.8.0
|
||||
'''
|
||||
|
@ -20,18 +19,20 @@ def init(**kwargs):
|
|||
'''
|
||||
Initialize the directories for the files
|
||||
'''
|
||||
roots_path = __opts__['file_roots']['base'][0]
|
||||
pillar_path = __opts__['pillar_roots']['base'][0]
|
||||
for dir_ in (roots_path, pillar_path):
|
||||
formula_path = __opts__['formula_path']
|
||||
pillar_path = __opts__['pillar_path']
|
||||
reactor_path = __opts__['reactor_path']
|
||||
for dir_ in (formula_path, pillar_path, reactor_path):
|
||||
if not os.path.exists(dir_):
|
||||
os.makedirs(dir_)
|
||||
return {
|
||||
'roots_path': roots_path,
|
||||
'formula_path': formula_path,
|
||||
'pillar_path': pillar_path,
|
||||
'reactor_path': reactor_path,
|
||||
}
|
||||
|
||||
|
||||
def check_existing(package, pkg_files, conn=None):
|
||||
def check_existing(package, pkg_files, formula_def, conn=None):
|
||||
'''
|
||||
Check the filesystem for existing files
|
||||
'''
|
||||
|
@ -42,20 +43,27 @@ def check_existing(package, pkg_files, conn=None):
|
|||
for member in pkg_files:
|
||||
if member.isdir():
|
||||
continue
|
||||
|
||||
tld = formula_def.get('top_level_dir', package)
|
||||
new_name = member.name.replace('{0}/'.format(package), '')
|
||||
if not new_name.startswith(tld):
|
||||
continue
|
||||
|
||||
if member.name.startswith('{0}/_'.format(package)):
|
||||
# Module files are distributed via _modules, _states, etc
|
||||
new_name = member.name.replace('{0}/'.format(package), '')
|
||||
out_file = os.path.join(conn['roots_path'], new_name)
|
||||
out_file = os.path.join(conn['formula_path'], new_name)
|
||||
elif member.name == '{0}/pillar.example'.format(package):
|
||||
# Pillars are automatically put in the pillar_roots
|
||||
# Pillars are automatically put in the pillar_path
|
||||
new_name = '{0}.sls.orig'.format(package)
|
||||
out_file = os.path.join(conn['pillar_path'], new_name)
|
||||
elif package.endswith('-conf'):
|
||||
# Module files are distributed via _modules, _states, etc
|
||||
new_name = member.name.replace('{0}/'.format(package), '')
|
||||
# Configuration files go into /etc/salt/
|
||||
out_file = os.path.join(salt.syspaths.CONFIG_DIR, new_name)
|
||||
elif package.endswith('-reactor'):
|
||||
# Reactor files go into /srv/reactor/
|
||||
out_file = os.path.join(conn['reactor_path'], member.name)
|
||||
else:
|
||||
out_file = os.path.join(conn['roots_path'], member.name)
|
||||
out_file = os.path.join(conn['formula_path'], member.name)
|
||||
|
||||
if os.path.exists(out_file):
|
||||
existing_files.append(out_file)
|
||||
|
@ -65,26 +73,38 @@ def check_existing(package, pkg_files, conn=None):
|
|||
return existing_files
|
||||
|
||||
|
||||
def install_file(package, formula_tar, member, conn=None):
|
||||
def install_file(package, formula_tar, member, formula_def, conn=None):
|
||||
'''
|
||||
Install a single file to the file system
|
||||
'''
|
||||
if member.name == package:
|
||||
return False
|
||||
|
||||
if conn is None:
|
||||
conn = init()
|
||||
|
||||
out_path = conn['roots_path']
|
||||
out_path = conn['formula_path']
|
||||
|
||||
tld = formula_def.get('top_level_dir', package)
|
||||
new_name = member.name.replace('{0}/'.format(package), '', 1)
|
||||
if not new_name.startswith(tld) and not new_name.startswith('_') and not new_name.startswith('pillar.example'):
|
||||
log.debug('{0} not in top level directory, not installing'.format(new_name))
|
||||
return False
|
||||
|
||||
if member.name.startswith('{0}/_'.format(package)):
|
||||
# Module files are distributed via _modules, _states, etc
|
||||
member.name = member.name.replace('{0}/'.format(package), '')
|
||||
elif member.name == '{0}/pillar.example'.format(package):
|
||||
# Pillars are automatically put in the pillar_roots
|
||||
# Pillars are automatically put in the pillar_path
|
||||
member.name = '{0}.sls.orig'.format(package)
|
||||
out_path = conn['pillar_path']
|
||||
elif package.endswith('-conf'):
|
||||
# Module files are distributed via _modules, _states, etc
|
||||
# Configuration files go into /etc/salt/
|
||||
member.name = member.name.replace('{0}/'.format(package), '')
|
||||
out_path = salt.syspaths.CONFIG_DIR
|
||||
elif package.endswith('-reactor'):
|
||||
# Reactor files go into /srv/reactor/
|
||||
out_path = __opts__['reactor_path']
|
||||
|
||||
log.debug('Installing package file {0} to {1}'.format(member.name, out_path))
|
||||
formula_tar.extract(member, out_path)
|
|
@ -769,7 +769,7 @@ class State(object):
|
|||
func[func.rindex('.'):]
|
||||
)
|
||||
self.functions[f_key] = funcs[func]
|
||||
self.states = salt.loader.states(self.opts, self.functions)
|
||||
self.states = salt.loader.states(self.opts, self.functions, self.utils)
|
||||
self.rend = salt.loader.render(self.opts, self.functions, states=self.states)
|
||||
|
||||
def module_refresh(self):
|
||||
|
@ -3231,7 +3231,8 @@ class MasterState(State):
|
|||
)
|
||||
# Load the states, but they should not be used in this class apart
|
||||
# from inspection
|
||||
self.states = salt.loader.states(self.opts, self.functions)
|
||||
self.utils = salt.loader.utils(self.opts)
|
||||
self.states = salt.loader.states(self.opts, self.functions, self.utils)
|
||||
self.rend = salt.loader.render(self.opts, self.functions, states=self.states)
|
||||
|
||||
|
||||
|
|
1089
salt/states/git.py
1089
salt/states/git.py
File diff suppressed because it is too large
Load diff
|
@ -221,8 +221,8 @@ def present(name, entry=None, family='ipv4', **kwargs):
|
|||
ret['changes'] = {'locale': name}
|
||||
ret['result'] = True
|
||||
ret['comment'] += 'entry {0} added to set {1} for family {2}\n'.format(
|
||||
kwargs['set_name'],
|
||||
_entry,
|
||||
kwargs['set_name'],
|
||||
family)
|
||||
else:
|
||||
ret['result'] = False
|
||||
|
|
|
@ -34,7 +34,8 @@ except ImportError:
|
|||
__generated_syspaths = imp.new_module('salt._syspaths')
|
||||
for key in ('ROOT_DIR', 'CONFIG_DIR', 'CACHE_DIR', 'SOCK_DIR',
|
||||
'SRV_ROOT_DIR', 'BASE_FILE_ROOTS_DIR', 'BASE_PILLAR_ROOTS_DIR',
|
||||
'BASE_MASTER_ROOTS_DIR', 'LOGS_DIR', 'PIDFILE_DIR'):
|
||||
'BASE_MASTER_ROOTS_DIR', 'LOGS_DIR', 'PIDFILE_DIR',
|
||||
'SPM_FORMULA_PATH', 'SPM_PILLAR_PATH', 'SPM_REACTOR_PATH'):
|
||||
setattr(__generated_syspaths, key, None)
|
||||
|
||||
|
||||
|
@ -104,6 +105,18 @@ PIDFILE_DIR = __generated_syspaths.PIDFILE_DIR
|
|||
if PIDFILE_DIR is None:
|
||||
PIDFILE_DIR = os.path.join(ROOT_DIR, 'var', 'run')
|
||||
|
||||
SPM_FORMULA_PATH = __generated_syspaths.SPM_FORMULA_PATH
|
||||
if SPM_FORMULA_PATH is None:
|
||||
SPM_FORMULA_PATH = os.path.join(SRV_ROOT_DIR, 'spm', 'salt')
|
||||
|
||||
SPM_PILLAR_PATH = __generated_syspaths.SPM_PILLAR_PATH
|
||||
if SPM_PILLAR_PATH is None:
|
||||
SPM_PILLAR_PATH = os.path.join(SRV_ROOT_DIR, 'spm', 'pillar')
|
||||
|
||||
SPM_REACTOR_PATH = __generated_syspaths.SPM_REACTOR_PATH
|
||||
if SPM_REACTOR_PATH is None:
|
||||
SPM_REACTOR_PATH = os.path.join(SRV_ROOT_DIR, 'spm', 'reactor')
|
||||
|
||||
|
||||
__all__ = [
|
||||
'ROOT_DIR',
|
||||
|
@ -118,5 +131,8 @@ __all__ = [
|
|||
'PIDFILE_DIR',
|
||||
'INSTALL_DIR',
|
||||
'CLOUD_DIR',
|
||||
'BOOTSTRAP'
|
||||
'BOOTSTRAP',
|
||||
'SPM_FORMULA_PATH',
|
||||
'SPM_PILLAR_PATH',
|
||||
'SPM_REACTOR_PATH'
|
||||
]
|
||||
|
|
|
@ -2258,19 +2258,10 @@ class WinRepo(GitBase):
|
|||
'''
|
||||
Functionality specific to the winrepo runner
|
||||
'''
|
||||
def __init__(self, opts):
|
||||
def __init__(self, opts, winrepo_dir):
|
||||
self.role = 'winrepo'
|
||||
# Dulwich has no function to check out a branch/tag, so this will be
|
||||
# limited to GitPython and Pygit2 for the forseeable future.
|
||||
if 'win_repo' in opts:
|
||||
salt.utils.warn_until(
|
||||
'Nitrogen',
|
||||
'The \'win_repo\' config option is deprecated, please use '
|
||||
'\'winrepo_dir\' instead.'
|
||||
)
|
||||
winrepo_dir = opts['win_repo']
|
||||
else:
|
||||
winrepo_dir = opts['winrepo_dir']
|
||||
GitBase.__init__(self,
|
||||
opts,
|
||||
valid_providers=('gitpython', 'pygit2'),
|
||||
|
|
|
@ -20,11 +20,9 @@ except ImportError:
|
|||
import salt.utils
|
||||
import salt.utils.aws
|
||||
import salt.utils.xmlutil as xml
|
||||
import salt.utils.iam as iam
|
||||
from salt._compat import ElementTree as ET
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
DEFAULT_LOCATION = 'us-east-1'
|
||||
|
||||
|
||||
def query(key, keyid, method='GET', params=None, headers=None,
|
||||
|
@ -92,11 +90,6 @@ def query(key, keyid, method='GET', params=None, headers=None,
|
|||
key = salt.utils.aws.IROLE_CODE
|
||||
keyid = salt.utils.aws.IROLE_CODE
|
||||
|
||||
if not location:
|
||||
location = iam.get_iam_region()
|
||||
if not location:
|
||||
location = DEFAULT_LOCATION
|
||||
|
||||
if kms_keyid is not None and method in ('PUT', 'POST'):
|
||||
headers['x-amz-server-side-encryption'] = 'aws:kms'
|
||||
headers['x-amz-server-side-encryption-aws-kms-key-id'] = kms_keyid
|
||||
|
|
|
@ -554,6 +554,15 @@ def dependency_information(include_salt_cloud=False):
|
|||
('Tornado', 'tornado', 'version'),
|
||||
('timelib', 'timelib', 'version'),
|
||||
('dateutil', 'dateutil', '__version__'),
|
||||
('pygit2', 'pygit2', '__version__'),
|
||||
('smmap', 'smmap', '__version__'),
|
||||
('cffi', 'cffi', '__version__'),
|
||||
('pycparser', 'pycparser', '__version__'),
|
||||
('gitdb', 'gitdb', '__version__'),
|
||||
('gitpython', 'gitpython', '__version__'),
|
||||
('python-gnupg', 'python-gnupg', '__version__'),
|
||||
('mysql-python', 'mysql-python', '__version__'),
|
||||
('cherrypy', 'cherrypy', '__version__'),
|
||||
]
|
||||
|
||||
if include_salt_cloud:
|
||||
|
|
15
setup.py
15
setup.py
|
@ -205,6 +205,9 @@ class GenerateSaltSyspaths(Command):
|
|||
base_master_roots_dir=self.distribution.salt_base_master_roots_dir,
|
||||
logs_dir=self.distribution.salt_logs_dir,
|
||||
pidfile_dir=self.distribution.salt_pidfile_dir,
|
||||
spm_formula_path=self.distribution.salt_spm_formula_dir,
|
||||
spm_pillar_path=self.distribution.salt_spm_pillar_dir,
|
||||
spm_reactor_path=self.distribution.salt_spm_reactor_dir,
|
||||
)
|
||||
)
|
||||
|
||||
|
@ -620,6 +623,9 @@ BASE_PILLAR_ROOTS_DIR = \'{base_pillar_roots_dir}\'
|
|||
BASE_MASTER_ROOTS_DIR = \'{base_master_roots_dir}\'
|
||||
LOGS_DIR = \'{logs_dir}\'
|
||||
PIDFILE_DIR = \'{pidfile_dir}\'
|
||||
SPM_FORMULA_PATH = \'{spm_formula_path}\'
|
||||
SPM_PILLAR_PATH = \'{spm_pillar_path}\'
|
||||
SPM_REACTOR_PATH = \'{spm_reactor_path}\'
|
||||
'''
|
||||
|
||||
|
||||
|
@ -823,6 +829,12 @@ class SaltDistribution(distutils.dist.Distribution):
|
|||
'Salt\'s pre-configured logs directory'),
|
||||
('salt-pidfile-dir=', None,
|
||||
'Salt\'s pre-configured pidfiles directory'),
|
||||
('salt-spm-formula-dir=', None,
|
||||
'Salt\'s pre-configured SPM formulas directory'),
|
||||
('salt-spm-pillar-dir=', None,
|
||||
'Salt\'s pre-configured SPM pillar directory'),
|
||||
('salt-spm-reactor-dir=', None,
|
||||
'Salt\'s pre-configured SPM reactor directory'),
|
||||
]
|
||||
|
||||
def __init__(self, attrs=None):
|
||||
|
@ -842,6 +854,9 @@ class SaltDistribution(distutils.dist.Distribution):
|
|||
self.salt_base_master_roots_dir = None
|
||||
self.salt_logs_dir = None
|
||||
self.salt_pidfile_dir = None
|
||||
self.salt_spm_formula_dir = None
|
||||
self.salt_spm_pillar_dir = None
|
||||
self.salt_spm_reactor_dir = None
|
||||
|
||||
self.name = 'salt-ssh' if PACKAGED_FOR_SALT_SSH else 'salt'
|
||||
self.salt_version = __version__ # pylint: disable=undefined-variable
|
||||
|
|
|
@ -130,7 +130,7 @@ class LoaderGlobalsTest(integration.ModuleCase):
|
|||
- __grains__
|
||||
- __context__
|
||||
'''
|
||||
self._verify_globals(salt.loader.states(self.master_opts, {}))
|
||||
self._verify_globals(salt.loader.states(self.master_opts, {}, {}))
|
||||
|
||||
def test_renderers(self):
|
||||
'''
|
||||
|
|
|
@ -185,13 +185,13 @@ comment support')
|
|||
self.assertEqual(ipset.check('set', 'entry'),
|
||||
'Error: Set set does not exist')
|
||||
|
||||
with patch.object(ipset, '_find_set_type', return_value=True):
|
||||
with patch.object(ipset, '_find_set_type', return_value='hash:ip'):
|
||||
with patch.object(ipset, '_find_set_members',
|
||||
side_effect=['entry', '',
|
||||
["192.168.0.4", "192.168.0.5"],
|
||||
["192.168.0.3"], ["192.168.0.6"],
|
||||
["192.168.0.4", "192.168.0.5"],
|
||||
["192.168.0.3"], ["192.168.0.6"],
|
||||
['192.168.0.4', '192.168.0.5'],
|
||||
['192.168.0.3'], ['192.168.0.6'],
|
||||
['192.168.0.4', '192.168.0.5'],
|
||||
['192.168.0.3'], ['192.168.0.6'],
|
||||
]):
|
||||
self.assertTrue(ipset.check('set', 'entry'))
|
||||
self.assertFalse(ipset.check('set', 'entry'))
|
||||
|
@ -202,6 +202,19 @@ comment support')
|
|||
self.assertFalse(ipset.check('set', '192.168.0.4-192.168.0.5'))
|
||||
self.assertFalse(ipset.check('set', '192.168.0.4-192.168.0.5'))
|
||||
|
||||
with patch.object(ipset, '_find_set_type', return_value='hash:net'):
|
||||
with patch.object(ipset, '_find_set_members',
|
||||
side_effect=['entry', '',
|
||||
'192.168.0.4/31', '192.168.0.4/30',
|
||||
'192.168.0.4/31', '192.168.0.4/30',
|
||||
]):
|
||||
self.assertTrue(ipset.check('set', 'entry'))
|
||||
self.assertFalse(ipset.check('set', 'entry'))
|
||||
self.assertTrue(ipset.check('set', '192.168.0.4/31'))
|
||||
self.assertFalse(ipset.check('set', '192.168.0.4/31'))
|
||||
self.assertTrue(ipset.check('set', '192.168.0.4-192.168.0.5'))
|
||||
self.assertFalse(ipset.check('set', '192.168.0.4-192.168.0.5'))
|
||||
|
||||
def test_test(self):
|
||||
'''
|
||||
Test for Test if an entry is in the specified set.
|
||||
|
|
|
@ -67,7 +67,7 @@ opts = salt.config.DEFAULT_MINION_OPTS
|
|||
ctx = {}
|
||||
utils = salt.loader.utils(opts, context=ctx, whitelist=['boto'])
|
||||
funcs = salt.loader.minion_mods(opts, context=ctx, utils=utils, whitelist=['boto_vpc'])
|
||||
salt_states = salt.loader.states(opts=opts, functions=funcs, whitelist=['boto_vpc'])
|
||||
salt_states = salt.loader.states(opts=opts, functions=funcs, utils=utils, whitelist=['boto_vpc'])
|
||||
|
||||
|
||||
def _has_required_boto():
|
||||
|
|
Loading…
Add table
Reference in a new issue