mirror of
https://github.com/saltstack/salt.git
synced 2025-04-17 10:10:20 +00:00
Merge branch '2015.8' into 'develop'
Conflicts: - salt/beacons/service.py - salt/cloud/clouds/vmware.py - salt/modules/file.py - salt/modules/ipset.py - salt/modules/npm.py - salt/modules/rpm.py - salt/states/file.py - salt/utils/event.py - salt/utils/schedule.py - tests/unit/modules/state_test.py
This commit is contained in:
commit
39b49621ad
52 changed files with 943 additions and 446 deletions
41
doc/faq.rst
41
doc/faq.rst
|
@ -131,13 +131,6 @@ should be opened on our tracker_, with the following information:
|
|||
|
||||
.. _tracker: https://github.com/saltstack/salt/issues
|
||||
|
||||
I'm using gitfs and my custom modules/states/etc are not syncing. Why?
|
||||
----------------------------------------------------------------------
|
||||
|
||||
In versions of Salt 0.16.3 or older, there is a bug in :doc:`gitfs
|
||||
</topics/tutorials/gitfs>` which can affect the syncing of custom types.
|
||||
Upgrading to 0.16.4 or newer will fix this.
|
||||
|
||||
Why aren't my custom modules/states/etc. available on my Minions?
|
||||
-----------------------------------------------------------------
|
||||
|
||||
|
@ -199,6 +192,40 @@ backup_mode can be configured on a per state basis, or in the minion config
|
|||
(note that if set in the minion config this would simply be the default
|
||||
method to use, you still need to specify that the file should be backed up!).
|
||||
|
||||
Is it possible to deploy a file to a specific minion, without other minions having access to it?
|
||||
------------------------------------------------------------------------------------------------
|
||||
|
||||
The Salt fileserver does not yet support access control, but it is still
|
||||
possible to do this. As of Salt 2015.5.0, the
|
||||
:mod:`file_tree <salt.pillar.file_tree>` external pillar is available, and
|
||||
allows the contents of a file to be loaded as Pillar data. This external pillar
|
||||
is capable of assigning Pillar values both to individual minions, and to
|
||||
:ref:`nodegroups <targeting-nodegroups>`. See the :mod:`documentation
|
||||
<salt.pillar.file_tree>` for details on how to set this up.
|
||||
|
||||
Once the external pillar has been set up, the data can be pushed to a minion
|
||||
via a :py:func:`file.managed <salt.states.file.managed>` state, using the
|
||||
``contents_pillar`` argument:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
/etc/my_super_secret_file:
|
||||
file.managed:
|
||||
- user: secret
|
||||
- group: secret
|
||||
- mode: 600
|
||||
- contents_pillar: secret_files:my_super_secret_file
|
||||
|
||||
In this example, the source file would be located in a directory called
|
||||
``secret_files`` underneath the file_tree path for the minion. The syntax for
|
||||
specifying the pillar variable is the same one used for :py:func:`pillar.get
|
||||
<salt.modules.pillar.get>`, with a colon representing a nested dictionary.
|
||||
|
||||
.. warning::
|
||||
Deploying binary contents using the :py:func:`file.managed
|
||||
<salt.states.file.managed>` state is only supported in Salt 2015.8.4 and
|
||||
newer.
|
||||
|
||||
What is the best way to restart a Salt daemon using Salt?
|
||||
---------------------------------------------------------
|
||||
|
||||
|
|
|
@ -68,7 +68,7 @@ allow minions ending with foo.org access to the publisher.
|
|||
- pkg.*
|
||||
|
||||
.. note::
|
||||
Minions are matched using regular expressions when configuring ``peer``.
|
||||
Functions are matched using regular expressions.
|
||||
|
||||
Peer Runner Communication
|
||||
=========================
|
||||
|
@ -97,7 +97,7 @@ to the manage and jobs runner functions.
|
|||
- jobs.*
|
||||
|
||||
.. note::
|
||||
Minions are matched using regular expressions when configuring ``peer_run``.
|
||||
Functions are matched using regular expressions.
|
||||
|
||||
Using Peer Communication
|
||||
========================
|
||||
|
|
|
@ -74,12 +74,12 @@ the grains in map files will override grains in the profile. For example:
|
|||
image: CentOS 6.2 (64-bit) w/ None
|
||||
os: RHEL6
|
||||
minion:
|
||||
master: salt.mycompany.com
|
||||
master: salt.example.com
|
||||
grains:
|
||||
french: fries
|
||||
|
||||
In this example, mynodename will include grains for both fromage and french,
|
||||
but the master will be salt-master, not salt-mycompany.com.
|
||||
but the master will be salt-master, not salt.example.com.
|
||||
|
||||
AWS Improvements
|
||||
================
|
||||
|
|
|
@ -124,7 +124,7 @@ Set up an initial profile at ``/etc/salt/cloud.profiles`` or
|
|||
ip: 10.20.30.123
|
||||
gateway: [10.20.30.110]
|
||||
subnet_mask: 255.255.255.128
|
||||
domain: mycompany.com
|
||||
domain: example.com
|
||||
Network adapter 2:
|
||||
name: 10.30.40-500-Dev-DHCP
|
||||
adapter_type: e1000
|
||||
|
@ -136,7 +136,7 @@ Set up an initial profile at ``/etc/salt/cloud.profiles`` or
|
|||
ip: 10.40.50.123
|
||||
gateway: [10.40.50.110]
|
||||
subnet_mask: 255.255.255.128
|
||||
domain: mycompany.com
|
||||
domain: example.com
|
||||
scsi:
|
||||
SCSI controller 1:
|
||||
type: lsilogic
|
||||
|
@ -150,7 +150,7 @@ Set up an initial profile at ``/etc/salt/cloud.profiles`` or
|
|||
IDE 2
|
||||
IDE 3
|
||||
|
||||
domain: mycompany.com
|
||||
domain: example.com
|
||||
dns_servers:
|
||||
- 123.127.255.240
|
||||
- 123.127.255.241
|
||||
|
|
|
@ -57,6 +57,5 @@ few functions that dave can execute on other minions. It also allows steve
|
|||
unrestricted access to salt commands.
|
||||
|
||||
.. note::
|
||||
Minions are matched using regular expressions when configuring
|
||||
``client_acl`` and ``external_auth``.
|
||||
Functions are matched using regular expressions.
|
||||
|
||||
|
|
19
doc/topics/releases/2015.8.4.rst
Normal file
19
doc/topics/releases/2015.8.4.rst
Normal file
|
@ -0,0 +1,19 @@
|
|||
===========================
|
||||
Salt 2015.8.4 Release Notes
|
||||
===========================
|
||||
|
||||
Core Changes
|
||||
============
|
||||
|
||||
- Added option ``mock=True`` for :mod:`state.sls <salt.modules.state.sls>` and
|
||||
:mod:`state.highstate <salt.modules.state.highstate>`. This allows the salt
|
||||
state compiler to process sls data in a state run without actually calling
|
||||
the state functions, thus providing feedback on the validity of the arguments
|
||||
used for the functions beyond the preprocessing validation provided by
|
||||
``state.show_sls`` (:issue:`30118` and :issue:`30189`).
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
salt '*' state.sls core,edit.vim mock=True
|
||||
salt '*' state.highstate mock=True
|
||||
salt '*' state.apply edit.vim mock=True
|
|
@ -9,6 +9,7 @@ from __future__ import absolute_import, print_function, generators
|
|||
import os
|
||||
import copy
|
||||
import glob
|
||||
import inspect
|
||||
import time
|
||||
import signal
|
||||
import logging
|
||||
|
@ -1477,8 +1478,15 @@ class Cloud(object):
|
|||
ret[alias][driver] = {}
|
||||
|
||||
if kwargs:
|
||||
argnames = inspect.getargspec(self.clouds[fun]).args
|
||||
for _ in inspect.getargspec(self.clouds[fun]).defaults:
|
||||
argnames.pop(0)
|
||||
kws = {}
|
||||
for kwarg in argnames:
|
||||
kws[kwarg] = kwargs.get(kwarg, None)
|
||||
kws['call'] = 'action'
|
||||
ret[alias][driver][vm_name] = self.clouds[fun](
|
||||
vm_name, kwargs, call='action'
|
||||
vm_name, **kws
|
||||
)
|
||||
else:
|
||||
ret[alias][driver][vm_name] = self.clouds[fun](
|
||||
|
|
|
@ -3815,6 +3815,21 @@ def attach_volume(name=None, kwargs=None, instance_id=None, call=None):
|
|||
provider=get_provider(),
|
||||
opts=__opts__,
|
||||
sigver='4')
|
||||
while data[0]:
|
||||
log.warn(
|
||||
('Error attaching volume {0} '
|
||||
'to instance {1}. Retrying!').format(kwargs['volume_id'],
|
||||
instance_id))
|
||||
# Instance isn't running yet, so cannot attach this volume to it
|
||||
# wait for instance to run and try again
|
||||
time.sleep(10)
|
||||
data = aws.query(params,
|
||||
return_url=True,
|
||||
location=get_location(),
|
||||
provider=get_provider(),
|
||||
opts=__opts__,
|
||||
sigver='4')
|
||||
|
||||
return data
|
||||
|
||||
|
||||
|
|
|
@ -293,7 +293,7 @@ def _add_new_hard_disk_helper(disk_label, size_gb, unit_number, controller_key=1
|
|||
return disk_spec
|
||||
|
||||
|
||||
def _edit_existing_network_adapter(network_adapter, new_network_name, adapter_type, switch_type):
|
||||
def _edit_existing_network_adapter(network_adapter, new_network_name, adapter_type, switch_type, container_ref=None):
|
||||
adapter_type.strip().lower()
|
||||
switch_type.strip().lower()
|
||||
|
||||
|
@ -310,7 +310,7 @@ def _edit_existing_network_adapter(network_adapter, new_network_name, adapter_ty
|
|||
edited_network_adapter = network_adapter
|
||||
|
||||
if switch_type == 'standard':
|
||||
network_ref = salt.utils.vmware.get_mor_by_property(_get_si(), vim.Network, new_network_name)
|
||||
network_ref = salt.utils.vmware.get_mor_by_property(_get_si(), vim.Network, new_network_name, container_ref=container_ref)
|
||||
edited_network_adapter.backing = vim.vm.device.VirtualEthernetCard.NetworkBackingInfo()
|
||||
edited_network_adapter.backing.deviceName = new_network_name
|
||||
edited_network_adapter.backing.network = network_ref
|
||||
|
@ -349,7 +349,7 @@ def _edit_existing_network_adapter(network_adapter, new_network_name, adapter_ty
|
|||
return network_spec
|
||||
|
||||
|
||||
def _add_new_network_adapter_helper(network_adapter_label, network_name, adapter_type, switch_type):
|
||||
def _add_new_network_adapter_helper(network_adapter_label, network_name, adapter_type, switch_type, container_ref=None):
|
||||
random_key = randint(-4099, -4000)
|
||||
|
||||
adapter_type.strip().lower()
|
||||
|
@ -373,11 +373,13 @@ def _add_new_network_adapter_helper(network_adapter_label, network_name, adapter
|
|||
network_spec.device.backing.deviceName = network_name
|
||||
network_spec.device.backing.network = salt.utils.vmware.get_mor_by_property(_get_si(),
|
||||
vim.Network,
|
||||
network_name)
|
||||
network_name,
|
||||
container_ref=container_ref)
|
||||
elif switch_type == 'distributed':
|
||||
network_ref = salt.utils.vmware.get_mor_by_property(_get_si(),
|
||||
vim.dvs.DistributedVirtualPortgroup,
|
||||
network_name)
|
||||
network_name,
|
||||
container_ref=container_ref)
|
||||
dvs_port_connection = vim.dvs.PortConnection(
|
||||
portgroupKey=network_ref.key,
|
||||
switchUuid=network_ref.config.distributedVirtualSwitch.uuid
|
||||
|
@ -657,7 +659,7 @@ def _manage_devices(devices, vm=None):
|
|||
adapter_type = devices['network'][network_adapter_label]['adapter_type'] if 'adapter_type' in devices['network'][network_adapter_label] else ''
|
||||
switch_type = devices['network'][network_adapter_label]['switch_type'] if 'switch_type' in devices['network'][network_adapter_label] else ''
|
||||
# create the network adapter
|
||||
network_spec = _add_new_network_adapter_helper(network_adapter_label, network_name, adapter_type, switch_type)
|
||||
network_spec = _add_new_network_adapter_helper(network_adapter_label, network_name, adapter_type, switch_type, container_ref)
|
||||
adapter_mapping = _set_network_adapter_mapping(devices['network'][network_adapter_label])
|
||||
device_specs.append(network_spec)
|
||||
nics_map.append(adapter_mapping)
|
||||
|
@ -2162,8 +2164,14 @@ def create(vm_):
|
|||
)
|
||||
|
||||
if 'clonefrom' in vm_:
|
||||
# If datacenter is specified, set the container reference to start search from it instead
|
||||
container_ref = None
|
||||
if datacenter:
|
||||
datacenter_ref = salt.utils.vmware.get_mor_by_property(_get_si(), vim.Datacenter, datacenter)
|
||||
container_ref = datacenter_ref if datacenter_ref else None
|
||||
|
||||
# Clone VM/template from specified VM/template
|
||||
object_ref = salt.utils.vmware.get_mor_by_property(_get_si(), vim.VirtualMachine, vm_['clonefrom'])
|
||||
object_ref = salt.utils.vmware.get_mor_by_property(_get_si(), vim.VirtualMachine, vm_['clonefrom'], container_ref=container_ref)
|
||||
if object_ref:
|
||||
clone_type = "template" if object_ref.config.template else "vm"
|
||||
else:
|
||||
|
@ -2234,12 +2242,12 @@ def create(vm_):
|
|||
# Either a datastore/datastore cluster can be optionally specified.
|
||||
# If not specified, the current datastore is used.
|
||||
if datastore:
|
||||
datastore_ref = salt.utils.vmware.get_mor_by_property(_get_si(), vim.Datastore, datastore)
|
||||
datastore_ref = salt.utils.vmware.get_mor_by_property(_get_si(), vim.Datastore, datastore, container_ref=container_ref)
|
||||
if datastore_ref:
|
||||
# specific datastore has been specified
|
||||
reloc_spec.datastore = datastore_ref
|
||||
else:
|
||||
datastore_cluster_ref = salt.utils.vmware.get_mor_by_property(_get_si(), vim.StoragePod, datastore)
|
||||
datastore_cluster_ref = salt.utils.vmware.get_mor_by_property(_get_si(), vim.StoragePod, datastore, container_ref=container_ref)
|
||||
if not datastore_cluster_ref:
|
||||
log.error("Specified datastore/datastore cluster: '{0}' does not exist".format(datastore))
|
||||
log.debug("Using datastore used by the {0} {1}".format(clone_type, vm_['clonefrom']))
|
||||
|
@ -2248,7 +2256,7 @@ def create(vm_):
|
|||
log.debug("Using datastore used by the {0} {1}".format(clone_type, vm_['clonefrom']))
|
||||
|
||||
if host:
|
||||
host_ref = salt.utils.vmware.get_mor_by_property(_get_si(), vim.HostSystem, host)
|
||||
host_ref = salt.utils.vmware.get_mor_by_property(_get_si(), vim.HostSystem, host, container_ref=container_ref)
|
||||
if host_ref:
|
||||
reloc_spec.host = host_ref
|
||||
else:
|
||||
|
|
|
@ -437,8 +437,17 @@ def _refresh_buckets_cache_file(cache_file):
|
|||
continue
|
||||
except KeyError:
|
||||
# no human readable error message provided
|
||||
log.warning("'{0}' response for bucket '{1}'".format(meta_response['Code'], bucket_name))
|
||||
continue
|
||||
if 'Code' in meta_response:
|
||||
log.warning(
|
||||
("'{0}' response for "
|
||||
"bucket '{1}'").format(meta_response['Code'],
|
||||
bucket_name))
|
||||
continue
|
||||
else:
|
||||
log.warning(
|
||||
('S3 Error! Do you have any files '
|
||||
'in your S3 bucket?'))
|
||||
return {}
|
||||
|
||||
metadata[saltenv] = bucket_files
|
||||
|
||||
|
@ -467,8 +476,17 @@ def _refresh_buckets_cache_file(cache_file):
|
|||
continue
|
||||
except KeyError:
|
||||
# no human readable error message provided
|
||||
log.warning("'{0}' response for bucket '{1}'".format(meta_response['Code'], bucket_name))
|
||||
continue
|
||||
if 'Code' in meta_response:
|
||||
log.warning(
|
||||
("'{0}' response for "
|
||||
"bucket '{1}'").format(meta_response['Code'],
|
||||
bucket_name))
|
||||
continue
|
||||
else:
|
||||
log.warning(
|
||||
('S3 Error! Do you have any files '
|
||||
'in your S3 bucket?'))
|
||||
return {}
|
||||
|
||||
environments = [(os.path.dirname(k['Key']).split('/', 1))[0] for k in files]
|
||||
environments = set(environments)
|
||||
|
|
|
@ -1313,10 +1313,10 @@ class LazyLoader(salt.utils.lazy.LazyDict):
|
|||
# containing the names of the proxy types that the module supports.
|
||||
#
|
||||
# Render modules and state modules are OK though
|
||||
if 'proxymodule' in self.opts:
|
||||
if 'proxy' in self.opts:
|
||||
if self.tag not in ['render', 'states', 'utils']:
|
||||
if not hasattr(mod, '__proxyenabled__') or \
|
||||
(self.opts['proxymodule'].loaded_base_name not in mod.__proxyenabled__ and
|
||||
(self.opts['proxy']['proxytype'] not in mod.__proxyenabled__ and
|
||||
'*' not in mod.__proxyenabled__):
|
||||
err_string = 'not a proxy_minion enabled module'
|
||||
self.missing_modules[module_name] = err_string
|
||||
|
|
|
@ -753,7 +753,8 @@ class Minion(MinionBase):
|
|||
)
|
||||
# Late setup the of the opts grains, so we can log from the grains
|
||||
# module. If this is a proxy, however, we need to init the proxymodule
|
||||
# before we can get the grains.
|
||||
# before we can get the grains. We do this for proxies in the
|
||||
# post_master_init
|
||||
if not salt.utils.is_proxy():
|
||||
self.opts['grains'] = salt.loader.grains(opts)
|
||||
|
||||
|
@ -1844,7 +1845,8 @@ class Minion(MinionBase):
|
|||
log.critical('The beacon errored: ', exc_info=True)
|
||||
if beacons:
|
||||
self._fire_master(events=beacons)
|
||||
self.periodic_callbacks['beacons'] = tornado.ioloop.PeriodicCallback(handle_beacons, loop_interval * 1000, io_loop=self.io_loop)
|
||||
|
||||
self.periodic_callbacks['beacons'] = tornado.ioloop.PeriodicCallback(handle_beacons, loop_interval * 1000, io_loop=self.io_loop)
|
||||
|
||||
# TODO: actually listen to the return and change period
|
||||
def handle_schedule():
|
||||
|
@ -2702,7 +2704,13 @@ class ProxyMinion(Minion):
|
|||
|
||||
proxy_init_fn = self.proxy[fq_proxyname+'.init']
|
||||
proxy_init_fn(self.opts)
|
||||
self.opts['grains'] = salt.loader.grains(self.opts)
|
||||
|
||||
# Proxies have a chicken-and-egg problem. Usually we load grains early
|
||||
# in the setup process, but we can't load grains for proxies until
|
||||
# we talk to the device we are proxying for. So reload the grains
|
||||
# functions here, and then force a grains sync.
|
||||
self.opts['grains'] = salt.loader.grains(self.opts, force_refresh=True)
|
||||
self.functions['saltutil.sync_grains'](saltenv='base')
|
||||
|
||||
# Check config 'add_proxymodule_to_opts' Remove this in Boron.
|
||||
if self.opts['add_proxymodule_to_opts']:
|
||||
|
|
|
@ -981,7 +981,7 @@ def get_attribute(attribute, instance_name=None, instance_id=None, region=None,
|
|||
|
||||
.. code-block:: bash
|
||||
|
||||
salt myminion boto_ec2.get_attribute name=my_instance attribute=sourceDestCheck
|
||||
salt myminion boto_ec2.get_attribute sourceDestCheck instance_name=my_instance
|
||||
|
||||
Available attributes:
|
||||
* instanceType
|
||||
|
@ -1039,8 +1039,7 @@ def set_attribute(attribute, attribute_value, instance_name=None, instance_id=No
|
|||
|
||||
.. code-block:: bash
|
||||
|
||||
salt myminion boto_ec2.set_attribute instance_name=my_instance \
|
||||
attribute=sourceDestCheck attribute_value=False
|
||||
salt myminion boto_ec2.set_attribute sourceDestCheck False instance_name=my_instance
|
||||
|
||||
Available attributes:
|
||||
* instanceType
|
||||
|
|
|
@ -125,10 +125,10 @@ def __virtual__():
|
|||
return True
|
||||
|
||||
|
||||
def __init__(opts):
|
||||
def __init__(opts, pack=None):
|
||||
salt.utils.compat.pack_dunder(__name__)
|
||||
if HAS_BOTO:
|
||||
__utils__['boto.assign_funcs'](__name__, 'vpc')
|
||||
__utils__['boto.assign_funcs'](__name__, 'vpc', pack=pack)
|
||||
|
||||
|
||||
def check_vpc(vpc_id=None, vpc_name=None, region=None, key=None,
|
||||
|
|
|
@ -625,10 +625,17 @@ def _run_all_quiet(cmd,
|
|||
reset_system_locale=True,
|
||||
saltenv='base',
|
||||
pillarenv=None,
|
||||
pillar_override=None):
|
||||
pillar_override=None,
|
||||
output_loglevel=None):
|
||||
|
||||
'''
|
||||
Helper for running commands quietly for minion startup.
|
||||
Returns a dict of return data
|
||||
Returns a dict of return data.
|
||||
|
||||
output_loglevel argument is ignored. This is here for when we alias
|
||||
cmd.run_all directly to _run_all_quiet in certain chicken-and-egg
|
||||
situations where modules need to work both before and after
|
||||
the __salt__ dictionary is populated (cf dracr.py)
|
||||
'''
|
||||
return _run(cmd,
|
||||
runas=runas,
|
||||
|
|
|
@ -23,6 +23,8 @@ import salt.ext.six as six
|
|||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
__proxyenabled__ = ['*']
|
||||
|
||||
|
||||
def _auth():
|
||||
'''
|
||||
|
|
|
@ -90,7 +90,8 @@ def __execute_cmd(command, host=None,
|
|||
admin_username,
|
||||
admin_password,
|
||||
command,
|
||||
modswitch))
|
||||
modswitch),
|
||||
output_loglevel='quiet')
|
||||
|
||||
if cmd['retcode'] != 0:
|
||||
log.warning('racadm return an exit code \'{0}\'.'
|
||||
|
@ -123,7 +124,8 @@ def __execute_ret(command, host=None,
|
|||
admin_username,
|
||||
admin_password,
|
||||
command,
|
||||
modswitch))
|
||||
modswitch),
|
||||
output_loglevel='quiet')
|
||||
|
||||
if cmd['retcode'] != 0:
|
||||
log.warning('racadm return an exit code \'{0}\'.'
|
||||
|
|
|
@ -1661,21 +1661,28 @@ def replace(path,
|
|||
'''
|
||||
.. versionadded:: 0.17.0
|
||||
|
||||
Replace occurrences of a pattern in a file
|
||||
Replace occurrences of a pattern in a file. If ``show_changes`` is
|
||||
``True``, then a diff of what changed will be returned, otherwise a
|
||||
``True`` will be returnd when changes are made, and ``False`` when
|
||||
no changes are made.
|
||||
|
||||
This is a pure Python implementation that wraps Python's :py:func:`~re.sub`.
|
||||
|
||||
path
|
||||
Filesystem path to the file to be edited
|
||||
|
||||
pattern
|
||||
Python's regular expression search
|
||||
https://docs.python.org/2/library/re.html
|
||||
A regular expression, to be matched using Python's
|
||||
:py:func:`~re.search`.
|
||||
|
||||
repl
|
||||
The replacement text
|
||||
count
|
||||
Maximum number of pattern occurrences to be replaced. Defaults to 0.
|
||||
If count is a positive integer n, only n occurrences will be replaced,
|
||||
|
||||
count : 0
|
||||
Maximum number of pattern occurrences to be replaced. If count is a
|
||||
positive integer ``n``, only ``n`` occurrences will be replaced,
|
||||
otherwise all occurrences will be replaced.
|
||||
|
||||
flags (list or int)
|
||||
A list of flags defined in the :ref:`re module documentation
|
||||
<contents-of-module-re>`. Each list item should be a string that will
|
||||
|
@ -1683,65 +1690,71 @@ def replace(path,
|
|||
'MULTILINE']``. Optionally, ``flags`` may be an int, with a value
|
||||
corresponding to the XOR (``|``) of all the desired flags. Defaults to
|
||||
8 (which supports 'MULTILINE').
|
||||
|
||||
bufsize (int or str)
|
||||
How much of the file to buffer into memory at once. The
|
||||
default value ``1`` processes one line at a time. The special value
|
||||
``file`` may be specified which will read the entire file into memory
|
||||
before processing.
|
||||
append_if_not_found
|
||||
|
||||
append_if_not_found : False
|
||||
.. versionadded:: 2014.7.0
|
||||
|
||||
If pattern is not found and set to ``True``
|
||||
then, the content will be appended to the file.
|
||||
Default is ``False``
|
||||
prepend_if_not_found
|
||||
If set to ``True``, and pattern is not found, then the content will be
|
||||
appended to the file.
|
||||
|
||||
prepend_if_not_found : False
|
||||
.. versionadded:: 2014.7.0
|
||||
|
||||
If pattern is not found and set to ``True``
|
||||
then, the content will be prepended to the file.
|
||||
Default is ``False``
|
||||
If set to ``True`` and pattern is not found, then the content will be
|
||||
prepended to the file.
|
||||
|
||||
not_found_content
|
||||
.. versionadded:: 2014.7.0
|
||||
|
||||
Content to use for append/prepend if not found. If
|
||||
None (default), uses ``repl``. Useful when ``repl`` uses references to group in
|
||||
pattern.
|
||||
backup
|
||||
The file extension to use for a backup of the file before
|
||||
editing. Set to ``False`` to skip making a backup. Default
|
||||
is ``.bak``
|
||||
dry_run
|
||||
Don't make any edits to the file, Default is ``False``
|
||||
search_only
|
||||
Just search for the pattern; ignore the replacement;
|
||||
stop on the first match. Default is ``False``
|
||||
show_changes
|
||||
Output a unified diff of the old file and the new
|
||||
file. If ``False`` return a boolean if any changes were made.
|
||||
Default is ``True``
|
||||
Content to use for append/prepend if not found. If None (default), uses
|
||||
``repl``. Useful when ``repl`` uses references to group in pattern.
|
||||
|
||||
backup : .bak
|
||||
The file extension to use for a backup of the file before editing. Set
|
||||
to ``False`` to skip making a backup.
|
||||
|
||||
dry_run : False
|
||||
If set to ``True``, no changes will be made to the file, the function
|
||||
will just return the changes that would have been made (or a
|
||||
``True``/``False`` value if ``show_changes`` is set to ``False``).
|
||||
|
||||
search_only : False
|
||||
If set to true, this no changes will be perfomed on the file, and this
|
||||
function will simply return ``True`` if the pattern was matched, and
|
||||
``False`` if not.
|
||||
|
||||
show_changes : True
|
||||
If ``True``, return a diff of changes made. Otherwise, return ``True``
|
||||
if changes were made, and ``False`` if not.
|
||||
|
||||
.. note::
|
||||
Using this option will store two copies of the file in memory (the
|
||||
original version and the edited version) in order to generate the
|
||||
diff. This may not normally be a concern, but could impact
|
||||
performance if used with large files.
|
||||
|
||||
Using this option will store two copies of the file in-memory
|
||||
(the original version and the edited version) in order to generate the
|
||||
diff.
|
||||
ignore_if_missing
|
||||
ignore_if_missing : False
|
||||
.. versionadded:: 2015.8.0
|
||||
|
||||
When this parameter is ``True``, ``file.replace`` will return ``False`` if the
|
||||
file doesn't exist. When this parameter is ``False``, ``file.replace`` will
|
||||
throw an error if the file doesn't exist.
|
||||
Default is ``False`` (to maintain compatibility with prior behaviour).
|
||||
preserve_inode
|
||||
If set to ``True``, this function will simply return ``False``
|
||||
if the file doesn't exist. Otherwise, an error will be thrown.
|
||||
|
||||
preserve_inode : True
|
||||
.. versionadded:: 2015.8.0
|
||||
|
||||
Preserve the inode of the file, so that any hard links continue to share the
|
||||
inode with the original filename. This works by *copying* the file, reading
|
||||
from the copy, and writing to the file at the original inode. If ``False``, the
|
||||
file will be *moved* rather than copied, and a new file will be written to a
|
||||
new inode, but using the original filename. Hard links will then share an inode
|
||||
with the backup, instead (if using ``backup`` to create a backup copy).
|
||||
Default is ``True``.
|
||||
Preserve the inode of the file, so that any hard links continue to
|
||||
share the inode with the original filename. This works by *copying* the
|
||||
file, reading from the copy, and writing to the file at the original
|
||||
inode. If ``False``, the file will be *moved* rather than copied, and a
|
||||
new file will be written to a new inode, but using the original
|
||||
filename. Hard links will then share an inode with the backup, instead
|
||||
(if using ``backup`` to create a backup copy).
|
||||
|
||||
If an equal sign (``=``) appears in an argument to a Salt command it is
|
||||
interpreted as a keyword argument in the format ``key=val``. That
|
||||
|
@ -1781,10 +1794,14 @@ def replace(path,
|
|||
)
|
||||
|
||||
if search_only and (append_if_not_found or prepend_if_not_found):
|
||||
raise SaltInvocationError('Choose between search_only and append/prepend_if_not_found')
|
||||
raise SaltInvocationError(
|
||||
'search_only cannot be used with append/prepend_if_not_found'
|
||||
)
|
||||
|
||||
if append_if_not_found and prepend_if_not_found:
|
||||
raise SaltInvocationError('Choose between append or prepend_if_not_found')
|
||||
raise SaltInvocationError(
|
||||
'Only one of append and prepend_if_not_found is permitted'
|
||||
)
|
||||
|
||||
flags_num = _get_flags(flags)
|
||||
cpattern = re.compile(str(pattern), flags_num)
|
||||
|
@ -2061,7 +2078,9 @@ def blockreplace(path,
|
|||
raise SaltInvocationError('File not found: {0}'.format(path))
|
||||
|
||||
if append_if_not_found and prepend_if_not_found:
|
||||
raise SaltInvocationError('Choose between append or prepend_if_not_found')
|
||||
raise SaltInvocationError(
|
||||
'Only one of append and prepend_if_not_found is permitted'
|
||||
)
|
||||
|
||||
if not salt.utils.istextfile(path):
|
||||
raise SaltInvocationError(
|
||||
|
@ -3114,7 +3133,8 @@ def rmdir(path):
|
|||
|
||||
def remove(path):
|
||||
'''
|
||||
Remove the named file
|
||||
Remove the named file. If a directory is supplied, it will be recursively
|
||||
deleted.
|
||||
|
||||
CLI Example:
|
||||
|
||||
|
|
|
@ -441,7 +441,7 @@ def remove_masquerade(zone):
|
|||
return __firewall_cmd('--zone={0} --remove-masquerade'.format(zone))
|
||||
|
||||
|
||||
def add_port(zone, port):
|
||||
def add_port(zone, port, permanent=True):
|
||||
'''
|
||||
Allow specific ports in a zone.
|
||||
|
||||
|
@ -456,10 +456,15 @@ def add_port(zone, port):
|
|||
if not get_masquerade(zone):
|
||||
add_masquerade(zone)
|
||||
|
||||
return __firewall_cmd('--zone={0} --add-port={1}'.format(zone, port))
|
||||
cmd = '--zone={0} --add-port={1}'.format(zone, port)
|
||||
|
||||
if permanent:
|
||||
cmd += ' --permanent'
|
||||
|
||||
return __firewall_cmd(cmd)
|
||||
|
||||
|
||||
def remove_port(zone, port):
|
||||
def remove_port(zone, port, permanent=True):
|
||||
'''
|
||||
Remove a specific port from a zone.
|
||||
|
||||
|
@ -471,7 +476,12 @@ def remove_port(zone, port):
|
|||
|
||||
salt '*' firewalld.remove_port internal 443/tcp
|
||||
'''
|
||||
return __firewall_cmd('--zone={0} --remove-port={1}'.format(zone, port))
|
||||
cmd = '--zone={0} --remove-port={1}'.format(zone, port)
|
||||
|
||||
if permanent:
|
||||
cmd += ' --permanent'
|
||||
|
||||
return __firewall_cmd(cmd)
|
||||
|
||||
|
||||
def list_ports(zone):
|
||||
|
|
|
@ -436,7 +436,7 @@ def check(set=None, entry=None, family='ipv4'):
|
|||
start, end = _entry.split('-')
|
||||
|
||||
if settype == 'hash:ip':
|
||||
entries = [' '.join([str(ipaddress.ip_address(ip)), ' '.join(_entry_extra)]) for ip in long_range(
|
||||
entries = [' '.join([str(ipaddress.ip_address(ip)), ' '.join(_entry_extra)]) for ip in range(
|
||||
ipaddress.ip_address(start),
|
||||
ipaddress.ip_address(end) + 1
|
||||
)]
|
||||
|
|
|
@ -106,7 +106,7 @@ def install(pkg=None,
|
|||
silent
|
||||
Whether or not to run NPM install with --silent flag.
|
||||
|
||||
.. versionadded::2015.9.0
|
||||
.. versionadded:: Boron
|
||||
|
||||
dry_run
|
||||
Whether or not to run NPM install with --dry-run flag.
|
||||
|
|
|
@ -695,6 +695,10 @@ def user_list(user=None, host=None, port=None, maintenance_db=None,
|
|||
replication_column = 'pg_roles.rolreplication'
|
||||
else:
|
||||
replication_column = 'NULL'
|
||||
if ver >= distutils.version.LooseVersion('9.5'):
|
||||
rolcatupdate_column = 'NULL'
|
||||
else:
|
||||
rolcatupdate_column = 'pg_roles.rolcatupdate'
|
||||
else:
|
||||
log.error('Could not retrieve Postgres version. Is Postgresql server running?')
|
||||
return False
|
||||
|
@ -709,9 +713,9 @@ def user_list(user=None, host=None, port=None, maintenance_db=None,
|
|||
'pg_roles.rolinherit as "inherits privileges", '
|
||||
'pg_roles.rolcreaterole as "can create roles", '
|
||||
'pg_roles.rolcreatedb as "can create databases", '
|
||||
'pg_roles.rolcatupdate as "can update system catalogs", '
|
||||
'{0} as "can update system catalogs", '
|
||||
'pg_roles.rolcanlogin as "can login", '
|
||||
'{0} as "replication", '
|
||||
'{1} as "replication", '
|
||||
'pg_roles.rolconnlimit as "connections", '
|
||||
'pg_roles.rolvaliduntil::timestamp(0) as "expiry time", '
|
||||
'pg_roles.rolconfig as "defaults variables" '
|
||||
|
@ -719,7 +723,7 @@ def user_list(user=None, host=None, port=None, maintenance_db=None,
|
|||
'FROM pg_roles '
|
||||
, _x('LEFT JOIN pg_authid ON pg_roles.oid = pg_authid.oid ')
|
||||
, _x('LEFT JOIN pg_shadow ON pg_roles.oid = pg_shadow.usesysid')
|
||||
]).format(replication_column))
|
||||
]).format(rolcatupdate_column, replication_column))
|
||||
|
||||
rows = psql_query(query,
|
||||
runas=runas,
|
||||
|
|
|
@ -8,7 +8,6 @@ from __future__ import absolute_import
|
|||
import logging
|
||||
import os
|
||||
import re
|
||||
import time
|
||||
import datetime
|
||||
|
||||
# Import Salt libs
|
||||
|
@ -403,24 +402,20 @@ def diff(package, path):
|
|||
return res
|
||||
|
||||
|
||||
def _pkg_time_to_iso(pkg_time):
|
||||
'''
|
||||
Convert package time to ISO 8601.
|
||||
|
||||
:param pkg_time:
|
||||
:return:
|
||||
'''
|
||||
ptime = time.strptime(pkg_time, '%a %d %b %Y %H:%M:%S %p %Z')
|
||||
return datetime.datetime(ptime.tm_year, ptime.tm_mon, ptime.tm_mday,
|
||||
ptime.tm_hour, ptime.tm_min, ptime.tm_sec).isoformat() + "Z"
|
||||
|
||||
|
||||
def info(*packages):
|
||||
def info(*packages, **attr):
|
||||
'''
|
||||
Return a detailed package(s) summary information.
|
||||
If no packages specified, all packages will be returned.
|
||||
|
||||
:param packages:
|
||||
|
||||
:param attr:
|
||||
Comma-separated package attributes. If no 'attr' is specified, all available attributes returned.
|
||||
|
||||
Valid attributes are:
|
||||
version, vendor, release, build_date, build_date_time_t, install_date, install_date_time_t,
|
||||
build_host, group, source_rpm, arch, epoch, size, license, signature, packager, url, summary, description.
|
||||
|
||||
:return:
|
||||
|
||||
CLI example:
|
||||
|
@ -428,32 +423,59 @@ def info(*packages):
|
|||
.. code-block:: bash
|
||||
|
||||
salt '*' lowpkg.info apache2 bash
|
||||
salt '*' lowpkg.info apache2 bash attr=version
|
||||
salt '*' lowpkg.info apache2 bash attr=version,build_date_iso,size
|
||||
'''
|
||||
|
||||
cmd = packages and "rpm -q {0}".format(' '.join(packages)) or "rpm -qa"
|
||||
|
||||
# Locale needs to be en_US instead of C, because RPM otherwise will yank the timezone from the timestamps
|
||||
call = __salt__['cmd.run_all'](cmd + (" --queryformat 'Name: %{NAME}\n"
|
||||
"Relocations: %|PREFIXES?{[%{PREFIXES} ]}:{(not relocatable)}|\n"
|
||||
"%|EPOCH?{Epoch: %{EPOCH}\n}|"
|
||||
"Version: %{VERSION}\n"
|
||||
"Vendor: %{VENDOR}\n"
|
||||
"Release: %{RELEASE}\n"
|
||||
"Architecture: %{ARCH}\n"
|
||||
"Build Date: %{BUILDTIME:date}\n"
|
||||
"Install Date: %|INSTALLTIME?{%{INSTALLTIME:date}}:{(not installed)}|\n"
|
||||
"Build Host: %{BUILDHOST}\n"
|
||||
"Group: %{GROUP}\n"
|
||||
"Source RPM: %{SOURCERPM}\n"
|
||||
"Size: %{LONGSIZE}\n"
|
||||
"%|LICENSE?{License: %{LICENSE}\n}|"
|
||||
"Signature: %|DSAHEADER?{%{DSAHEADER:pgpsig}}:{%|RSAHEADER?{%{RSAHEADER:pgpsig}}:{%|SIGGPG?{%{SIGGPG:pgpsig}}:{%|SIGPGP?{%{SIGPGP:pgpsig}}:{(none)}|}|}|}|\n"
|
||||
"%|PACKAGER?{Packager: %{PACKAGER}\n}|"
|
||||
"%|URL?{URL: %{URL}\n}|"
|
||||
"Summary: %{SUMMARY}\n"
|
||||
"Description:\n%{DESCRIPTION}\n"
|
||||
"-----\n'"),
|
||||
output_loglevel='trace', env={'LC_ALL': 'en_US', 'TZ': 'UTC'}, clean_env=True)
|
||||
# Construct query format
|
||||
attr_map = {
|
||||
"name": "name: %{NAME}\\n",
|
||||
"relocations": "relocations: %|PREFIXES?{[%{PREFIXES} ]}:{(not relocatable)}|\\n",
|
||||
"version": "version: %{VERSION}\\n",
|
||||
"vendor": "vendor: %{VENDOR}\\n",
|
||||
"release": "release: %{RELEASE}\\n",
|
||||
"epoch": "%|EPOCH?{epoch: %{EPOCH}\\n}|",
|
||||
"build_date_time_t": "build_date_time_t: %{BUILDTIME}\\n",
|
||||
"build_date": "build_date: %{BUILDTIME}\\n",
|
||||
"install_date_time_t": "install_date_time_t: %|INSTALLTIME?{%{INSTALLTIME}}:{(not installed)}|\\n",
|
||||
"install_date": "install_date: %|INSTALLTIME?{%{INSTALLTIME}}:{(not installed)}|\\n",
|
||||
"build_host": "build_host: %{BUILDHOST}\\n",
|
||||
"group": "group: %{GROUP}\\n",
|
||||
"source_rpm": "source_rpm: %{SOURCERPM}\\n",
|
||||
"size": "size: %{LONGSIZE}\\n",
|
||||
"arch": "arch: %{ARCH}\\n",
|
||||
"license": "%|LICENSE?{license: %{LICENSE}\\n}|",
|
||||
"signature": "signature: %|DSAHEADER?{%{DSAHEADER:pgpsig}}:{%|RSAHEADER?{%{RSAHEADER:pgpsig}}:"
|
||||
"{%|SIGGPG?{%{SIGGPG:pgpsig}}:{%|SIGPGP?{%{SIGPGP:pgpsig}}:{(none)}|}|}|}|\\n",
|
||||
"packager": "%|PACKAGER?{packager: %{PACKAGER}\\n}|",
|
||||
"url": "%|URL?{url: %{URL}\\n}|",
|
||||
"summary": "summary: %{SUMMARY}\\n",
|
||||
"description": "description:\\n%{DESCRIPTION}\\n",
|
||||
}
|
||||
|
||||
attr = attr.get('attr', None) and attr['attr'].split(",") or None
|
||||
query = list()
|
||||
if attr:
|
||||
for attr_k in attr:
|
||||
if attr_k in attr_map and attr_k != 'description':
|
||||
query.append(attr_map[attr_k])
|
||||
if not query:
|
||||
raise CommandExecutionError('No valid attributes found.')
|
||||
if 'name' not in attr:
|
||||
attr.append('name')
|
||||
query.append(attr_map['name'])
|
||||
else:
|
||||
for attr_k, attr_v in attr_map.iteritems():
|
||||
if attr_k != 'description':
|
||||
query.append(attr_v)
|
||||
if attr and 'description' in attr or not attr:
|
||||
query.append(attr_map['description'])
|
||||
query.append("-----\\n")
|
||||
|
||||
call = __salt__['cmd.run_all'](cmd + (" --queryformat '{0}'".format(''.join(query))),
|
||||
output_loglevel='trace', env={'TZ': 'UTC'}, clean_env=True)
|
||||
if call['retcode'] != 0:
|
||||
comment = ''
|
||||
if 'stderr' in call:
|
||||
|
@ -483,17 +505,31 @@ def info(*packages):
|
|||
if len(line) != 2:
|
||||
continue
|
||||
key, value = line
|
||||
key = key.replace(' ', '_').lower()
|
||||
if key == 'description':
|
||||
descr_marker = True
|
||||
continue
|
||||
if key == 'name':
|
||||
pkg_name = value
|
||||
|
||||
# Convert Unix ticks into ISO time format
|
||||
if key in ['build_date', 'install_date']:
|
||||
value = _pkg_time_to_iso(value)
|
||||
if key != 'description' and value:
|
||||
try:
|
||||
pkg_data[key] = datetime.datetime.fromtimestamp(int(value)).isoformat() + "Z"
|
||||
except ValueError:
|
||||
log.warning('Could not convert "{0}" into Unix time'.format(value))
|
||||
continue
|
||||
|
||||
# Convert Unix ticks into an Integer
|
||||
if key in ['build_date_time_t', 'install_date_time_t']:
|
||||
try:
|
||||
pkg_data[key] = int(value)
|
||||
except ValueError:
|
||||
log.warning('Could not convert "{0}" into Unix time'.format(value))
|
||||
continue
|
||||
if key not in ['description', 'name'] and value:
|
||||
pkg_data[key] = value
|
||||
pkg_data['description'] = os.linesep.join(descr)
|
||||
if attr and 'description' in attr or not attr:
|
||||
pkg_data['description'] = os.linesep.join(descr)
|
||||
if pkg_name:
|
||||
ret[pkg_name] = pkg_data
|
||||
|
||||
|
|
|
@ -57,7 +57,8 @@ def prep_bootstrap(mpt):
|
|||
fp_ = os.path.join(fpd_, os.path.basename(bs_))
|
||||
# Copy script into tmp
|
||||
shutil.copy(bs_, fp_)
|
||||
return fp_
|
||||
tmppath = fpd_.replace(mpt, '')
|
||||
return fp_, tmppath
|
||||
|
||||
|
||||
def _mount(path, ftype):
|
||||
|
@ -237,11 +238,11 @@ def _install(mpt):
|
|||
'''
|
||||
|
||||
_check_resolv(mpt)
|
||||
boot_ = (prep_bootstrap(mpt)
|
||||
boot_, tmppath = (prep_bootstrap(mpt)
|
||||
or salt.syspaths.BOOTSTRAP)
|
||||
# Exec the chroot command
|
||||
cmd = 'if type salt-minion; then exit 0; '
|
||||
cmd += 'else sh {0} -c /tmp; fi'.format(salt.syspaths.BOOTSTRAP)
|
||||
cmd += 'else sh {0} -c /tmp; fi'.format(os.path.join(tmppath, 'bootstrap-salt.sh'))
|
||||
return not __salt__['cmd.run_chroot'](mpt, cmd, python_shell=True)['retcode']
|
||||
|
||||
|
||||
|
|
|
@ -509,6 +509,8 @@ def highstate(test=None,
|
|||
calling any states. This then returns a mocked return which will show
|
||||
the requisite ordering as well as fully validate the state run.
|
||||
|
||||
.. versionadded:: 2015.8.4
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
@ -676,6 +678,8 @@ def sls(mods,
|
|||
calling any states. This then returns a mocked return which will show
|
||||
the requisite ordering as well as fully validate the state run.
|
||||
|
||||
.. versionadded:: 2015.8.4
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
|
|
@ -9,6 +9,7 @@ import errno
|
|||
import os
|
||||
import locale
|
||||
import logging
|
||||
import time
|
||||
from distutils.version import LooseVersion # pylint: disable=import-error,no-name-in-module
|
||||
|
||||
# Import third party libs
|
||||
|
@ -785,11 +786,13 @@ def install(name=None, refresh=False, pkgs=None, saltenv='base', **kwargs):
|
|||
new = list_pkgs()
|
||||
tries = 0
|
||||
difference = salt.utils.compare_dicts(old, new)
|
||||
while not all(name in difference for name in changed) and tries <= 1000:
|
||||
while not all(name in difference for name in changed) and tries < 10:
|
||||
time.sleep(3)
|
||||
new = list_pkgs()
|
||||
difference = salt.utils.compare_dicts(old, new)
|
||||
tries += 1
|
||||
if tries == 1000:
|
||||
log.debug("Try {0}".format(tries))
|
||||
if tries == 10:
|
||||
ret['_comment'] = 'Registry not updated.'
|
||||
|
||||
# Compare the software list before and after
|
||||
|
|
|
@ -515,7 +515,7 @@ def join_domain(domain,
|
|||
|
||||
:param str domain:
|
||||
The domain to which the computer should be joined, e.g.
|
||||
``my-company.com``
|
||||
``example.com``
|
||||
|
||||
:param str username:
|
||||
Username of an account which is authorized to join computers to the
|
||||
|
|
|
@ -99,19 +99,32 @@ def list_upgrades(refresh=True):
|
|||
list_updates = salt.utils.alias_function(list_upgrades, 'list_updates')
|
||||
|
||||
|
||||
def info_installed(*names):
|
||||
def info_installed(*names, **attr):
|
||||
'''
|
||||
Return the information of the named package(s), installed on the system.
|
||||
|
||||
:param names:
|
||||
Names of the packages to get information about.
|
||||
|
||||
:param attr:
|
||||
Comma-separated package attributes. If no 'attr' is specified, all available attributes returned.
|
||||
|
||||
Valid attributes are:
|
||||
version, vendor, release, build_date, build_date_time_t, install_date, install_date_time_t,
|
||||
build_host, group, source_rpm, arch, epoch, size, license, signature, packager, url,
|
||||
summary, description.
|
||||
|
||||
CLI example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' pkg.info_installed <package1>
|
||||
salt '*' pkg.info_installed <package1> <package2> <package3> ...
|
||||
salt '*' pkg.info_installed <package1> attr=version,vendor
|
||||
salt '*' pkg.info_installed <package1> <package2> <package3> ... attr=version,vendor
|
||||
'''
|
||||
ret = dict()
|
||||
for pkg_name, pkg_nfo in __salt__['lowpkg.info'](*names).items():
|
||||
for pkg_name, pkg_nfo in __salt__['lowpkg.info'](*names, **attr).items():
|
||||
t_nfo = dict()
|
||||
# Translate dpkg-specific keys to a common structure
|
||||
for key, value in pkg_nfo.items():
|
||||
|
|
|
@ -395,7 +395,7 @@ class Pillar(object):
|
|||
if isinstance(comp, six.string_types):
|
||||
states[comp] = True
|
||||
if ignore_missing:
|
||||
self.ignored_pillars[saltenv] = list(states.keys())
|
||||
self.ignored_pillars[saltenv].extend(states.keys())
|
||||
top[saltenv][tgt] = matches
|
||||
top[saltenv][tgt].extend(states)
|
||||
return self.sort_top_targets(top, orders)
|
||||
|
|
|
@ -1,9 +1,11 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
|
||||
'''
|
||||
Recursively iterate over directories and add all files as Pillar data.
|
||||
Recursively iterate over directories and add all files as Pillar data
|
||||
|
||||
Example configuration:
|
||||
.. versionadded:: 2015.5.0
|
||||
|
||||
Example Configuration
|
||||
---------------------
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
|
@ -11,24 +13,54 @@ Example configuration:
|
|||
- file_tree:
|
||||
root_dir: /path/to/root/directory
|
||||
follow_dir_links: False
|
||||
raw_data: False
|
||||
keep_newline: True
|
||||
|
||||
The ``root_dir`` parameter is required and points to the directory where files
|
||||
for each host are stored. The ``follow_dir_links`` parameter is optional
|
||||
and defaults to False. If ``follow_dir_links`` is set to True, file_tree will
|
||||
follow symbolic links to other directories. Be careful when using
|
||||
``follow_dir_links``, the current implementation is dumb and will run into
|
||||
infinite recursion if a recursive symlink chain exists in the root_dir!
|
||||
for each host are stored. The ``follow_dir_links`` parameter is optional and
|
||||
defaults to False. If ``follow_dir_links`` is set to True, this external pillar
|
||||
will follow symbolic links to other directories.
|
||||
|
||||
If ``raw_data`` is set to True, it will revert the behavior of the python
|
||||
open() function, which adds a line break character at the end of the file,
|
||||
in this case, the pillar data.
|
||||
.. warning::
|
||||
Be careful when using ``follow_dir_links``, as a recursive symlink chain
|
||||
will result in unexpected results.
|
||||
|
||||
To fill pillar data for each host, file_tree recursively iterates over
|
||||
``root_dir``/hosts/``id`` (where ``id`` is a minion ID), and constructs
|
||||
the same directory tree with contents of all the files inside the pillar tree.
|
||||
If ``keep_newline`` is set to ``True``, then the pillar values for files ending
|
||||
in newlines will keep that newline. The default behavior is to remove the
|
||||
end-of-file newline. ``keep_newline`` should be turned on if the pillar data is
|
||||
intended to be used to deploy a file using ``contents_pillar`` with a
|
||||
:py:func:`file.managed <salt.states.file.managed>` state.
|
||||
|
||||
For example, the following ``root_dir`` tree::
|
||||
.. versionchanged:: 2015.8.4
|
||||
The ``raw_data`` parameter has been renamed to ``keep_newline``. In earlier
|
||||
releases, ``raw_data`` must be used. Also, this parameter can now be a list
|
||||
of globs, allowing for more granular control over which pillar values keep
|
||||
their end-of-file newline. The globs match paths relative to the
|
||||
directories named for minion IDs and nodegroups underneath the ``root_dir``
|
||||
(see the layout examples in the below sections).
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
ext_pillar:
|
||||
- file_tree:
|
||||
root_dir: /path/to/root/directory
|
||||
keep_newlines:
|
||||
- files/testdir/*
|
||||
|
||||
.. note::
|
||||
Binary files are not affected by the ``keep_newlines`` configuration.
|
||||
|
||||
|
||||
Assigning Pillar Data to Individual Hosts
|
||||
-----------------------------------------
|
||||
|
||||
To configure pillar data for each host, this external pillar will recursively
|
||||
iterate over ``root_dir``/hosts/``id`` (where ``id`` is a minion ID), and
|
||||
compile pillar data with each subdirectory as a dictionary key and each file
|
||||
as a value.
|
||||
|
||||
For example, the following ``root_dir`` tree:
|
||||
|
||||
.. code-block:: text
|
||||
|
||||
./hosts/
|
||||
./hosts/test-host/
|
||||
|
@ -39,7 +71,9 @@ For example, the following ``root_dir`` tree::
|
|||
./hosts/test-host/files/another-testdir/
|
||||
./hosts/test-host/files/another-testdir/symlink-to-file1.txt
|
||||
|
||||
will result in the following pillar tree for minion with ID "test-host"::
|
||||
will result in the following pillar tree for minion with ID ``test-host``:
|
||||
|
||||
.. code-block:: text
|
||||
|
||||
test-host:
|
||||
----------
|
||||
|
@ -58,13 +92,28 @@ will result in the following pillar tree for minion with ID "test-host"::
|
|||
file2.txt:
|
||||
Contents of file #2.
|
||||
|
||||
To fill pillar data for minion in a node group, file_tree recursively
|
||||
iterates over ``root_dir``/nodegroups/``nodegroup`` (where ``nodegroup`` is a
|
||||
minion node group), and constructs the same directory tree with contents of all
|
||||
the files inside the pillar tree.
|
||||
**IMPORTANT**: The host data take precedence over the node group data
|
||||
.. note::
|
||||
Subdirectories underneath ``root_dir``/hosts/``id`` become nested
|
||||
dictionaries, as shown above.
|
||||
|
||||
For example, the following ``root_dir`` tree::
|
||||
|
||||
Assigning Pillar Data to Entire Nodegroups
|
||||
------------------------------------------
|
||||
|
||||
To assign Pillar data to all minions in a given nodegroup, this external pillar
|
||||
recursively iterates over ``root_dir``/nodegroups/``nodegroup`` (where
|
||||
``nodegroup`` is the name of a nodegroup), and like for individual hosts,
|
||||
compiles pillar data with each subdirectory as a dictionary key and each file
|
||||
as a value.
|
||||
|
||||
.. important::
|
||||
If the same Pillar key is set for a minion both by nodegroup and by
|
||||
individual host, then the value set for the individual host will take
|
||||
precedence.
|
||||
|
||||
For example, the following ``root_dir`` tree:
|
||||
|
||||
.. code-block:: text
|
||||
|
||||
./nodegroups/
|
||||
./nodegroups/test-group/
|
||||
|
@ -75,8 +124,10 @@ For example, the following ``root_dir`` tree::
|
|||
./nodegroups/test-group/files/another-testdir/
|
||||
./nodegroups/test-group/files/another-testdir/symlink-to-file1.txt
|
||||
|
||||
will result in the following pillar tree for minion in the node group
|
||||
"test-group"::
|
||||
will result in the following pillar data for minions in the node group
|
||||
``test-group``:
|
||||
|
||||
.. code-block:: text
|
||||
|
||||
test-host:
|
||||
----------
|
||||
|
@ -98,15 +149,14 @@ will result in the following pillar tree for minion in the node group
|
|||
from __future__ import absolute_import
|
||||
|
||||
# Import python libs
|
||||
import fnmatch
|
||||
import logging
|
||||
import os
|
||||
import os.path
|
||||
from copy import deepcopy
|
||||
|
||||
# Import salt libs
|
||||
import salt.utils
|
||||
import salt.utils.dictupdate
|
||||
import salt.utils.minions
|
||||
import salt.ext.six as six
|
||||
|
||||
# Set up logging
|
||||
log = logging.getLogger(__name__)
|
||||
|
@ -116,30 +166,31 @@ def _on_walk_error(err):
|
|||
'''
|
||||
Log os.walk() error.
|
||||
'''
|
||||
log.error('"%s": %s', err.filename, err.strerror)
|
||||
log.error('%s: %s', err.filename, err.strerror)
|
||||
|
||||
|
||||
# Thanks to Ross McFarland for the dict_merge function
|
||||
# (Source: https://www.xormedia.com/recursively-merge-dictionaries-in-python/)
|
||||
def _dict_merge(dict_a, dict_b):
|
||||
def _check_newline(prefix, file_name, keep_newline):
|
||||
'''
|
||||
recursively merges dict's. not just simple dict_a['key'] = dict_b['key'],
|
||||
if both dict_a and dict_b have a key who's value is a dict then
|
||||
_dict_merge is called on both values and the result stored in the returned
|
||||
dictionary.
|
||||
Return a boolean stating whether or not a file's trailing newline should be
|
||||
removed. To figure this out, first check if keep_newline is a boolean and
|
||||
if so, return its opposite. Otherwise, iterate over keep_newline and check
|
||||
if any of the patterns match the file path. If a match is found, return
|
||||
False, otherwise return True.
|
||||
'''
|
||||
if not isinstance(dict_b, dict):
|
||||
return dict_b
|
||||
result = deepcopy(dict_a)
|
||||
for key, value in six.iteritems(dict_b):
|
||||
if key in result and isinstance(result[key], dict):
|
||||
result[key] = _dict_merge(result[key], value)
|
||||
else:
|
||||
result[key] = deepcopy(value)
|
||||
return result
|
||||
if isinstance(keep_newline, bool):
|
||||
return not keep_newline
|
||||
full_path = os.path.join(prefix, file_name)
|
||||
for pattern in keep_newline:
|
||||
try:
|
||||
if fnmatch.fnmatch(full_path, pattern):
|
||||
return False
|
||||
except TypeError:
|
||||
if fnmatch.fnmatch(full_path, str(pattern)):
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
def _construct_pillar(top_dir, follow_dir_links, raw_data=False):
|
||||
def _construct_pillar(top_dir, follow_dir_links, keep_newline=False):
|
||||
'''
|
||||
Construct pillar from file tree.
|
||||
'''
|
||||
|
@ -149,11 +200,11 @@ def _construct_pillar(top_dir, follow_dir_links, raw_data=False):
|
|||
for dir_path, dir_names, file_names in os.walk(
|
||||
top_dir, topdown=True, onerror=_on_walk_error,
|
||||
followlinks=follow_dir_links):
|
||||
# Find current path in pillar tree.
|
||||
# Find current path in pillar tree
|
||||
pillar_node = pillar
|
||||
norm_dir_path = os.path.normpath(dir_path)
|
||||
if norm_dir_path != norm_top_dir:
|
||||
rel_path = os.path.relpath(norm_dir_path, norm_top_dir)
|
||||
prefix = rel_path = os.path.relpath(norm_dir_path, norm_top_dir)
|
||||
path_parts = []
|
||||
while rel_path:
|
||||
rel_path, tail = os.path.split(rel_path)
|
||||
|
@ -161,42 +212,77 @@ def _construct_pillar(top_dir, follow_dir_links, raw_data=False):
|
|||
while path_parts:
|
||||
pillar_node = pillar_node[path_parts.pop(0)]
|
||||
|
||||
# Create dicts for subdirectories.
|
||||
# Create dicts for subdirectories
|
||||
for dir_name in dir_names:
|
||||
pillar_node[dir_name] = {}
|
||||
|
||||
# Add files.
|
||||
# Add files
|
||||
for file_name in file_names:
|
||||
file_path = os.path.join(dir_path, file_name)
|
||||
if not os.path.isfile(file_path):
|
||||
log.error('"%s": Not a regular file', file_path)
|
||||
log.error('file_tree: %s: not a regular file', file_path)
|
||||
continue
|
||||
|
||||
contents = ''
|
||||
try:
|
||||
with salt.utils.fopen(file_path, 'rb') as fhr:
|
||||
pillar_node[file_name] = fhr.read()
|
||||
if raw_data is False and pillar_node[file_name].endswith('\n'):
|
||||
pillar_node[file_name] = pillar_node[file_name][:-1]
|
||||
except IOError as err:
|
||||
log.error('%s', str(err))
|
||||
buf = fhr.read(__opts__['file_buffer_size'])
|
||||
while buf:
|
||||
contents += buf
|
||||
buf = fhr.read(__opts__['file_buffer_size'])
|
||||
if contents.endswith('\n') \
|
||||
and _check_newline(prefix,
|
||||
file_name,
|
||||
keep_newline):
|
||||
contents = contents[:-1]
|
||||
except (IOError, OSError) as exc:
|
||||
log.error('file_tree: Error reading %s: %s',
|
||||
file_path,
|
||||
exc.strerror)
|
||||
else:
|
||||
pillar_node[file_name] = contents
|
||||
|
||||
return pillar
|
||||
|
||||
|
||||
def ext_pillar(
|
||||
minion_id, pillar, root_dir=None,
|
||||
follow_dir_links=False, debug=False, raw_data=False):
|
||||
def ext_pillar(minion_id,
|
||||
pillar,
|
||||
root_dir=None,
|
||||
follow_dir_links=False,
|
||||
debug=False,
|
||||
raw_data=None,
|
||||
keep_newline=False):
|
||||
'''
|
||||
Find pillar data for specified ID.
|
||||
Compile pillar data for the specified minion ID
|
||||
'''
|
||||
# Not used.
|
||||
# Not used
|
||||
del pillar
|
||||
|
||||
if raw_data is not None:
|
||||
salt.utils.warn_until(
|
||||
'Nitrogen',
|
||||
'The \'raw_data\' argument for the file_tree ext_pillar has been '
|
||||
'deprecated, please use \'keep_newline\' instead'
|
||||
)
|
||||
keep_newline = raw_data
|
||||
|
||||
if not root_dir:
|
||||
log.error('No root_dir specified for file_tree pillar')
|
||||
log.error('file_tree: no root_dir specified')
|
||||
return {}
|
||||
|
||||
if not os.path.isdir(root_dir):
|
||||
log.error('"%s" does not exist or not a directory', root_dir)
|
||||
log.error(
|
||||
'file_tree: root_dir %s does not exist or is not a directory',
|
||||
root_dir
|
||||
)
|
||||
return {}
|
||||
|
||||
if not isinstance(keep_newline, (bool, list)):
|
||||
log.error(
|
||||
'file_tree: keep_newline must be either True/False or a list '
|
||||
'of file globs. Skipping this ext_pillar for root_dir %s',
|
||||
root_dir
|
||||
)
|
||||
return {}
|
||||
|
||||
ngroup_pillar = {}
|
||||
|
@ -216,24 +302,30 @@ def ext_pillar(
|
|||
ngroup_dir = os.path.join(
|
||||
nodegroups_dir, str(nodegroup))
|
||||
ngroup_pillar.update(
|
||||
_construct_pillar(ngroup_dir, follow_dir_links))
|
||||
_construct_pillar(ngroup_dir,
|
||||
follow_dir_links,
|
||||
keep_newline)
|
||||
)
|
||||
else:
|
||||
if debug is True:
|
||||
log.debug('File tree - No nodegroups found in file tree \
|
||||
directory ext_pillar_dirs, skipping...')
|
||||
log.debug(
|
||||
'file_tree: no nodegroups found in file tree directory '
|
||||
'ext_pillar_dirs, skipping...'
|
||||
)
|
||||
else:
|
||||
if debug is True:
|
||||
log.debug('File tree - No nodegroups found in master \
|
||||
configuration, skipping nodegroups pillar function...')
|
||||
log.debug('file_tree: no nodegroups found in master configuration')
|
||||
|
||||
host_dir = os.path.join(root_dir, 'hosts', minion_id)
|
||||
if not os.path.exists(host_dir):
|
||||
# No data for host with this ID.
|
||||
# No data for host with this ID
|
||||
return ngroup_pillar
|
||||
|
||||
if not os.path.isdir(host_dir):
|
||||
log.error('"%s" exists, but not a directory', host_dir)
|
||||
log.error('file_tree: %s exists, but is not a directory', host_dir)
|
||||
return ngroup_pillar
|
||||
|
||||
host_pillar = _construct_pillar(host_dir, follow_dir_links, raw_data)
|
||||
return _dict_merge(ngroup_pillar, host_pillar)
|
||||
host_pillar = _construct_pillar(host_dir, follow_dir_links, keep_newline)
|
||||
return salt.utils.dictupdate.merge(ngroup_pillar,
|
||||
host_pillar,
|
||||
strategy='recurse')
|
||||
|
|
|
@ -251,9 +251,9 @@ States
|
|||
------
|
||||
|
||||
Associated states are thoroughly documented in
|
||||
:doc:`salt.states.vsphere </ref/states/all/salt.states.vsphere>`. Look there
|
||||
to find an example structure for Pillar as well as an example ``.sls`` file f
|
||||
or standing up an ESXi host from scratch.
|
||||
:doc:`salt.states.esxi </ref/states/all/salt.states.esxi>`. Look there
|
||||
to find an example structure for Pillar as well as an example ``.sls`` file
|
||||
for standing up an ESXi host from scratch.
|
||||
|
||||
'''
|
||||
|
||||
|
@ -347,7 +347,7 @@ def ping():
|
|||
Check to see if the host is responding. Returns False if the host didn't
|
||||
respond, True otherwise.
|
||||
|
||||
CLI Example::
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
|
|
|
@ -125,18 +125,23 @@ def destroy(instances):
|
|||
return info
|
||||
|
||||
|
||||
def action(
|
||||
func=None,
|
||||
cloudmap=None,
|
||||
instances=None,
|
||||
provider=None,
|
||||
instance=None,
|
||||
**kwargs):
|
||||
def action(*args, **kwargs):
|
||||
'''
|
||||
Execute a single action on the given map/provider/instance
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt-run cloud.actions start my-salt-vm
|
||||
'''
|
||||
client = _get_client()
|
||||
info = client.action(func, cloudmap, instances, provider, instance, kwargs)
|
||||
info = client.action(args[0],
|
||||
kwargs.get('cloudmap', None),
|
||||
args[1:],
|
||||
kwargs.get('provider', None),
|
||||
kwargs.get('instance', None),
|
||||
kwargs)
|
||||
return info
|
||||
|
||||
|
||||
|
|
|
@ -46,7 +46,7 @@ def downloaded(name, artifact, target_dir='/tmp', target_file=None):
|
|||
jboss_module_downloaded:
|
||||
artifactory.downloaded:
|
||||
- artifact:
|
||||
artifactory_url: http://artifactory.intranet.company.com/artifactory
|
||||
artifactory_url: http://artifactory.intranet.example.com/artifactory
|
||||
repository: 'libs-release-local'
|
||||
artifact_id: 'module'
|
||||
group_id: 'com.company.module'
|
||||
|
@ -62,7 +62,7 @@ def downloaded(name, artifact, target_dir='/tmp', target_file=None):
|
|||
jboss_module_downloaded:
|
||||
artifactory.downloaded:
|
||||
- artifact:
|
||||
artifactory_url: http://artifactory.intranet.company.com/artifactory
|
||||
artifactory_url: http://artifactory.intranet.example.com/artifactory
|
||||
repository: 'libs-release-local'
|
||||
artifact_id: 'module'
|
||||
group_id: 'com.company.module'
|
||||
|
|
|
@ -76,6 +76,15 @@ with the role. This is the default behavior of the AWS console.
|
|||
If ``delete_policies: False`` is specified, existing policies that are not in
|
||||
the given list of policies will not be deleted. This allows manual modifications
|
||||
on the IAM role to be persistent. This functionality was added in 2015.8.0.
|
||||
|
||||
.. note::
|
||||
|
||||
When using the ``profile`` parameter and ``region`` is set outside of
|
||||
the profile group, region is ignored and a default region will be used.
|
||||
|
||||
If ``region`` is missing from the ``profile`` data set, ``us-east-1``
|
||||
will be used as the default region.
|
||||
|
||||
'''
|
||||
from __future__ import absolute_import
|
||||
import salt.utils.dictupdate as dictupdate
|
||||
|
|
|
@ -81,6 +81,15 @@ passed in as a dict, or as a string to pull from pillars or minion config:
|
|||
keyid: GKTADJGHEIQSXMKKRBJ08H
|
||||
key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs
|
||||
region: us-east-1
|
||||
|
||||
.. note::
|
||||
|
||||
When using the ``profile`` parameter and ``region`` is set outside of
|
||||
the profile group, region is ignored and a default region will be used.
|
||||
|
||||
If ``region`` is missing from the ``profile`` data set, ``us-east-1``
|
||||
will be used as the default region.
|
||||
|
||||
'''
|
||||
from __future__ import absolute_import
|
||||
|
||||
|
|
|
@ -8,7 +8,7 @@ Configure Chronos jobs via a salt proxy.
|
|||
- config:
|
||||
schedule: "R//PT2S"
|
||||
command: "echo 'hi'"
|
||||
owner: "me@mycompany.com"
|
||||
owner: "me@example.com"
|
||||
|
||||
.. versionadded:: 2015.8.2
|
||||
'''
|
||||
|
|
|
@ -260,6 +260,7 @@ from salt.ext.six.moves import zip_longest
|
|||
log = logging.getLogger(__name__)
|
||||
|
||||
COMMENT_REGEX = r'^([[:space:]]*){0}[[:space:]]?'
|
||||
__NOT_FOUND = object()
|
||||
|
||||
|
||||
def _get_accumulator_filepath():
|
||||
|
@ -970,7 +971,7 @@ def absent(name):
|
|||
'''
|
||||
Make sure that the named file or directory is absent. If it exists, it will
|
||||
be deleted. This will work to reverse any of the functions in the file
|
||||
state module.
|
||||
state module. If a directory is supplied, it will be recursively deleted.
|
||||
|
||||
name
|
||||
The path which should be deleted
|
||||
|
@ -1093,6 +1094,8 @@ def managed(name,
|
|||
contents_pillar=None,
|
||||
contents_grains=None,
|
||||
contents_newline=True,
|
||||
contents_delimiter=':',
|
||||
allow_empty=True,
|
||||
follow_symlinks=True,
|
||||
check_cmd=None,
|
||||
**kwargs):
|
||||
|
@ -1227,21 +1230,20 @@ def managed(name,
|
|||
used to render the downloaded file, currently jinja, mako, and wempy
|
||||
are supported
|
||||
|
||||
makedirs
|
||||
If the file is located in a path without a parent directory, then
|
||||
the state will fail. If makedirs is set to True, then the parent
|
||||
directories will be created to facilitate the creation of the named
|
||||
file.
|
||||
makedirs : False
|
||||
If set to ``True``, then the parent directories will be created to
|
||||
facilitate the creation of the named file. If ``False``, and the parent
|
||||
directory of the destination file doesn't exist, the state will fail.
|
||||
|
||||
dir_mode
|
||||
If directories are to be created, passing this option specifies the
|
||||
permissions for those directories. If this is not set, directories
|
||||
will be assigned permissions from the 'mode' argument.
|
||||
|
||||
replace
|
||||
If this file should be replaced. If false, this command will
|
||||
not overwrite file contents but will enforce permissions if the file
|
||||
exists already. Default is True.
|
||||
replace : True
|
||||
If set to ``False`` and the file already exists, the file will not be
|
||||
modified even if changes would otherwise be made. Permissions and
|
||||
ownership will still be enforced, however.
|
||||
|
||||
context
|
||||
Overrides default context variables passed to the template.
|
||||
|
@ -1255,18 +1257,40 @@ def managed(name,
|
|||
show_diff
|
||||
DEPRECATED: Please use show_changes.
|
||||
|
||||
If set to ``False``, the diff will not be shown in the return data if
|
||||
changes are made.
|
||||
|
||||
show_changes
|
||||
Output a unified diff of the old file and the new file. If ``False``
|
||||
return a boolean if any changes were made.
|
||||
|
||||
create
|
||||
Default is True, if create is set to False then the file will only be
|
||||
managed if the file already exists on the system.
|
||||
create : True
|
||||
If set to ``False``, then the file will only be managed if the file
|
||||
already exists on the system.
|
||||
|
||||
contents
|
||||
Default is None. If specified, will use the given string as the
|
||||
contents of the file. Should not be used in conjunction with a source
|
||||
file of any kind. Ignores hashes and does not use a templating engine.
|
||||
Specify the contents of the file. Cannot be used in combination with
|
||||
``source``. Ignores hashes and does not use a templating engine.
|
||||
|
||||
This value can be either a single string, a multiline YAML string or a
|
||||
list of strings. If a list of strings, then the strings will be joined
|
||||
together with newlines in the resulting file. For example, the below
|
||||
two example states would result in identical file contents:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
/path/to/file1:
|
||||
file.managed:
|
||||
- contents:
|
||||
- This is line 1
|
||||
- This is line 2
|
||||
|
||||
/path/to/file2:
|
||||
file.managed:
|
||||
- contents: |
|
||||
This is line 1
|
||||
This is line 2
|
||||
|
||||
|
||||
contents_pillar
|
||||
.. versionadded:: 0.17.0
|
||||
|
@ -1312,18 +1336,40 @@ def managed(name,
|
|||
pipe character, and the mutliline string is indented two more
|
||||
spaces.
|
||||
|
||||
To avoid the hassle of creating an indented multiline YAML string,
|
||||
the :mod:`file_tree external pillar <salt.pillar.file_tree>` can
|
||||
be used instead. However, this will not work for binary files in
|
||||
Salt releases before 2015.8.4.
|
||||
|
||||
contents_grains
|
||||
.. versionadded:: 2014.7.0
|
||||
|
||||
Same as contents_pillar, but with grains
|
||||
Same as ``contents_pillar``, but with grains
|
||||
|
||||
contents_newline
|
||||
contents_newline : True
|
||||
.. versionadded:: 2014.7.0
|
||||
.. versionchanged:: 2015.8.4
|
||||
This option is now ignored if the contents being deployed contain
|
||||
binary data.
|
||||
|
||||
When using contents, contents_pillar, or contents_grains, this option
|
||||
ensures the file will have a newline at the end.
|
||||
When loading some data this newline is better left off. Setting
|
||||
contents_newline to False will omit this final newline.
|
||||
If ``True``, files managed using ``contents``, ``contents_pillar``, or
|
||||
``contents_grains`` will have a newline added to the end of the file if
|
||||
one is not present. Setting this option to ``False`` will omit this
|
||||
final newline.
|
||||
|
||||
contents_delimiter
|
||||
.. versionadded:: 2015.8.4
|
||||
|
||||
Can be used to specify an alternate delimiter for ``contents_pillar``
|
||||
or ``contents_grains``. This delimiter will be passed through to
|
||||
:py:func:`pillar.get <salt.modules.pillar.get>` or :py:func:`grains.get
|
||||
<salt.modules.grains.get>` when retrieving the contents.
|
||||
|
||||
allow_empty : True
|
||||
.. versionadded:: 2015.8.4
|
||||
|
||||
If set to ``False``, then the state will fail if the contents specified
|
||||
by ``contents_pillar`` or ``contents_grains`` are empty.
|
||||
|
||||
follow_symlinks : True
|
||||
.. versionadded:: 2014.7.0
|
||||
|
@ -1351,42 +1397,97 @@ def managed(name,
|
|||
'name': name,
|
||||
'result': True}
|
||||
|
||||
# If no source is specified, set replace to False, as there is nothing
|
||||
# to replace the file with.
|
||||
src_defined = source or contents is not None or contents_pillar or contents_grains
|
||||
if not src_defined and replace:
|
||||
replace = False
|
||||
log.warning(
|
||||
'Neither \'source\' nor \'contents\' nor \'contents_pillar\' nor \'contents_grains\' '
|
||||
'was defined, yet \'replace\' was set to \'True\'. As there is '
|
||||
'no source to replace the file with, \'replace\' has been set '
|
||||
'to \'False\' to avoid reading the file unnecessarily'
|
||||
contents_count = len(
|
||||
[x for x in (contents, contents_pillar, contents_grains) if x]
|
||||
)
|
||||
|
||||
if source and contents_count > 0:
|
||||
return _error(
|
||||
ret,
|
||||
'\'source\' cannot be used in combination with \'contents\', '
|
||||
'\'contents_pillar\', or \'contents_grains\''
|
||||
)
|
||||
elif contents_count > 1:
|
||||
return _error(
|
||||
ret,
|
||||
'Only one of \'contents\', \'contents_pillar\', and '
|
||||
'\'contents_grains\' is permitted'
|
||||
)
|
||||
|
||||
if len([_f for _f in [contents, contents_pillar, contents_grains] if _f]) > 1:
|
||||
return _error(
|
||||
ret, 'Only one of contents, contents_pillar, and contents_grains is permitted')
|
||||
# If no source is specified, set replace to False, as there is nothing
|
||||
# with which to replace the file.
|
||||
if not source and contents_count == 0 and replace:
|
||||
replace = False
|
||||
log.warning(
|
||||
'Neither \'source\' nor \'contents\' nor \'contents_pillar\' nor '
|
||||
'\'contents_grains\' was defined, yet \'replace\' was set to '
|
||||
'\'True\'. As there is no source to replace the file with, '
|
||||
'\'replace\' has been set to \'False\' to avoid reading the file '
|
||||
'unnecessarily.'
|
||||
)
|
||||
|
||||
# Use this below to avoid multiple '\0' checks and save some CPU cycles
|
||||
contents_are_binary = False
|
||||
if contents_pillar:
|
||||
contents = __salt__['pillar.get'](contents_pillar)
|
||||
if not contents:
|
||||
return _error(ret, 'contents_pillar {0} results in empty contents'.format(contents_pillar))
|
||||
if contents_grains:
|
||||
contents = __salt__['grains.get'](contents_grains)
|
||||
if not contents:
|
||||
return _error(ret, 'contents_grain {0} results in empty contents'.format(contents_grains))
|
||||
contents = __salt__['pillar.get'](contents_pillar, __NOT_FOUND)
|
||||
if contents is __NOT_FOUND:
|
||||
return _error(
|
||||
ret,
|
||||
'Pillar {0} does not exist'.format(contents_pillar)
|
||||
)
|
||||
try:
|
||||
if '\0' in contents:
|
||||
contents_are_binary = True
|
||||
except TypeError:
|
||||
contents = str(contents)
|
||||
if not allow_empty and not contents:
|
||||
return _error(
|
||||
ret,
|
||||
'contents_pillar {0} results in empty contents'
|
||||
.format(contents_pillar)
|
||||
)
|
||||
|
||||
# ensure contents is a string
|
||||
if contents:
|
||||
validated_contents = _validate_str_list(contents)
|
||||
if not validated_contents:
|
||||
return _error(ret, '"contents" is not a string or list of strings')
|
||||
if isinstance(validated_contents, list):
|
||||
elif contents_grains:
|
||||
contents = __salt__['grains.get'](contents_grains, __NOT_FOUND)
|
||||
if contents is __NOT_FOUND:
|
||||
return _error(
|
||||
ret,
|
||||
'Grain {0} does not exist'.format(contents_grains)
|
||||
)
|
||||
try:
|
||||
if '\0' in contents:
|
||||
contents_are_binary = True
|
||||
except TypeError:
|
||||
contents = str(contents)
|
||||
if not allow_empty and not contents:
|
||||
return _error(
|
||||
ret,
|
||||
'contents_grains {0} results in empty contents'
|
||||
.format(contents_grains)
|
||||
)
|
||||
|
||||
elif contents:
|
||||
try:
|
||||
if '\0' in contents:
|
||||
contents_are_binary = True
|
||||
except TypeError:
|
||||
pass
|
||||
if not contents_are_binary:
|
||||
validated_contents = _validate_str_list(contents)
|
||||
if not validated_contents:
|
||||
return _error(
|
||||
ret,
|
||||
'\'contents\' is not a string or list of strings'
|
||||
)
|
||||
contents = os.linesep.join(validated_contents)
|
||||
if contents_newline:
|
||||
# Make sure file ends in newline
|
||||
if contents and not contents.endswith(os.linesep):
|
||||
contents += os.linesep
|
||||
|
||||
# If either contents_pillar or contents_grains were used, the contents
|
||||
# variable now contains the value loaded from pillar/grains.
|
||||
if contents \
|
||||
and not contents_are_binary \
|
||||
and contents_newline \
|
||||
and not contents.endswith(os.linesep):
|
||||
contents += os.linesep
|
||||
|
||||
# Make sure that leading zeros stripped by YAML loader are added back
|
||||
mode = __salt__['config.manage_mode'](mode)
|
||||
|
@ -1538,7 +1639,12 @@ def managed(name,
|
|||
try:
|
||||
__salt__['file.copy'](name, tmp_filename)
|
||||
except Exception as exc:
|
||||
return _error(ret, 'Unable to copy file {0} to {1}: {2}'.format(name, tmp_filename, exc))
|
||||
return _error(
|
||||
ret,
|
||||
'Unable to copy file {0} to {1}: {2}'.format(
|
||||
name, tmp_filename, exc
|
||||
)
|
||||
)
|
||||
|
||||
try:
|
||||
ret = __salt__['file.manage_file'](
|
||||
|
@ -2480,14 +2586,15 @@ def line(name, content, match=None, mode=None, location=None,
|
|||
if not check_res:
|
||||
return _error(ret, check_msg)
|
||||
|
||||
changes = __salt__['file.line'](name, content, match=match, mode=mode, location=location,
|
||||
before=before, after=after, show_changes=show_changes,
|
||||
backup=backup, quiet=quiet, indent=indent)
|
||||
changes = __salt__['file.line'](
|
||||
name, content, match=match, mode=mode, location=location,
|
||||
before=before, after=after, show_changes=show_changes,
|
||||
backup=backup, quiet=quiet, indent=indent)
|
||||
if changes:
|
||||
ret['pchanges']['diff'] = changes
|
||||
if __opts__['test']:
|
||||
ret['result'] = None
|
||||
ret['comment'] = 'Changes would have been made:\ndiff:\n{0}'.format(changes)
|
||||
ret['comment'] = 'Changes would be made:\ndiff:\n{0}'.format(changes)
|
||||
else:
|
||||
ret['result'] = True
|
||||
ret['comment'] = 'Changes were made'
|
||||
|
@ -2519,10 +2626,11 @@ def replace(name,
|
|||
Filesystem path to the file to be edited.
|
||||
|
||||
pattern
|
||||
Python's `regular expression search <https://docs.python.org/2/library/re.html>`_.
|
||||
A regular expression, to be matched using Python's
|
||||
:py:func:`~re.search`.
|
||||
|
||||
repl
|
||||
The replacement text.
|
||||
The replacement text
|
||||
|
||||
count
|
||||
Maximum number of pattern occurrences to be replaced. Defaults to 0.
|
||||
|
@ -2543,36 +2651,43 @@ def replace(name,
|
|||
specified which will read the entire file into memory before
|
||||
processing.
|
||||
|
||||
append_if_not_found
|
||||
If pattern is not found and set to ``True`` then, the content will be appended to the file.
|
||||
append_if_not_found : False
|
||||
If set to ``True``, and pattern is not found, then the content will be
|
||||
appended to the file.
|
||||
|
||||
.. versionadded:: 2014.7.0
|
||||
|
||||
prepend_if_not_found
|
||||
If pattern is not found and set to ``True`` then, the content will be prepended to the file.
|
||||
prepend_if_not_found : False
|
||||
If set to ``True`` and pattern is not found, then the content will be
|
||||
prepended to the file.
|
||||
|
||||
.. versionadded:: 2014.7.0
|
||||
|
||||
not_found_content
|
||||
Content to use for append/prepend if not found. If ``None`` (default), uses ``repl``. Useful
|
||||
when ``repl`` uses references to group in pattern.
|
||||
Content to use for append/prepend if not found. If ``None`` (default),
|
||||
uses ``repl``. Useful when ``repl`` uses references to group in
|
||||
pattern.
|
||||
|
||||
.. versionadded:: 2014.7.0
|
||||
|
||||
backup
|
||||
The file extension to use for a backup of the file before editing. Set to ``False`` to skip
|
||||
making a backup.
|
||||
The file extension to use for a backup of the file before editing. Set
|
||||
to ``False`` to skip making a backup.
|
||||
|
||||
show_changes
|
||||
Output a unified diff of the old file and the new file. If ``False`` return a boolean if any
|
||||
changes were made. Returns a boolean or a string.
|
||||
show_changes : True
|
||||
Output a unified diff of the old file and the new file. If ``False``
|
||||
return a boolean if any changes were made. Returns a boolean or a
|
||||
string.
|
||||
|
||||
.. note:
|
||||
Using this option will store two copies of the file in-memory (the original version and
|
||||
the edited version) in order to generate the diff.
|
||||
Using this option will store two copies of the file in memory (the
|
||||
original version and the edited version) in order to generate the
|
||||
diff. This may not normally be a concern, but could impact
|
||||
performance if used with large files.
|
||||
|
||||
For complex regex patterns it can be useful to avoid the need for complex quoting and escape
|
||||
sequences by making use of YAML's multiline string syntax.
|
||||
For complex regex patterns, it can be useful to avoid the need for complex
|
||||
quoting and escape sequences by making use of YAML's multiline string
|
||||
syntax.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
|
@ -3134,7 +3249,8 @@ def append(name,
|
|||
sources=None,
|
||||
source_hashes=None,
|
||||
defaults=None,
|
||||
context=None):
|
||||
context=None,
|
||||
ignore_whitespace=True):
|
||||
'''
|
||||
Ensure that some text appears at the end of a file.
|
||||
|
||||
|
@ -3237,6 +3353,13 @@ def append(name,
|
|||
context
|
||||
Overrides default context variables passed to the template.
|
||||
|
||||
ignore_whitespace
|
||||
.. versionadded:: 2015.8.4
|
||||
|
||||
Spaces and Tabs in text are ignored by default, when searching for the
|
||||
appending content, one space or multiple tabs are the same for salt.
|
||||
Set this option to ``False`` if you want to change this behavior.
|
||||
|
||||
Multi-line example:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
@ -3338,11 +3461,16 @@ def append(name,
|
|||
|
||||
try:
|
||||
for chunk in text:
|
||||
|
||||
if __salt__['file.search'](
|
||||
if ignore_whitespace:
|
||||
if __salt__['file.search'](
|
||||
name,
|
||||
salt.utils.build_whitespace_split_regex(chunk),
|
||||
multiline=True):
|
||||
continue
|
||||
elif __salt__['file.search'](
|
||||
name,
|
||||
chunk,
|
||||
multiline=True):
|
||||
continue
|
||||
|
||||
lines = chunk.splitlines()
|
||||
|
@ -4300,7 +4428,7 @@ def serialize(name,
|
|||
|
||||
formatter = kwargs.pop('formatter', 'yaml').lower()
|
||||
|
||||
if len([_f for _f in [dataset, dataset_pillar] if _f]) > 1:
|
||||
if len([x for x in (dataset, dataset_pillar) if x]) > 1:
|
||||
return _error(
|
||||
ret, 'Only one of \'dataset\' and \'dataset_pillar\' is permitted')
|
||||
|
||||
|
|
|
@ -25,7 +25,7 @@ Example of application deployment:
|
|||
application_deployed:
|
||||
jboss7.deployed:
|
||||
- artifact:
|
||||
artifactory_url: http://artifactory.intranet.company.com/artifactory
|
||||
artifactory_url: http://artifactory.intranet.example.com/artifactory
|
||||
repository: 'ext-release-local'
|
||||
artifact_id: 'webcomponent'
|
||||
group_id: 'com.company.application'
|
||||
|
@ -62,7 +62,7 @@ Configuration in pillars:
|
|||
.. code-block:: yaml
|
||||
|
||||
artifactory:
|
||||
url: 'http://artifactory.intranet.company.com/artifactory'
|
||||
url: 'http://artifactory.intranet.example.com/artifactory'
|
||||
repository: 'libs-snapshots-local'
|
||||
|
||||
webcomponent-artifact:
|
||||
|
@ -317,7 +317,7 @@ def deployed(name, jboss_config, artifact=None, salt_source=None):
|
|||
Dict with connection properties (see state description)
|
||||
artifact:
|
||||
If set, the artifact will be fetched from artifactory. This is a Dict object with the following properties:
|
||||
- artifactory_url: Full url to artifactory instance, for example: http://artifactory.intranet.company.com/artifactory
|
||||
- artifactory_url: Full url to artifactory instance, for example: http://artifactory.intranet.example.com/artifactory
|
||||
- repository: One of the repositories, for example: libs-snapshots, ext-release-local, etc..
|
||||
- artifact_id: Artifact ID of the artifact
|
||||
- group_id: Group ID of the artifact
|
||||
|
@ -366,7 +366,7 @@ def deployed(name, jboss_config, artifact=None, salt_source=None):
|
|||
application_deployed:
|
||||
jboss7.deployed:
|
||||
- artifact:
|
||||
artifactory_url: http://artifactory.intranet.company.com/artifactory
|
||||
artifactory_url: http://artifactory.intranet.example.com/artifactory
|
||||
repository: 'ext-release-local'
|
||||
artifact_id: 'webcomponent'
|
||||
group_id: 'com.company.application'
|
||||
|
@ -377,7 +377,7 @@ def deployed(name, jboss_config, artifact=None, salt_source=None):
|
|||
|
||||
This performs the following operations:
|
||||
|
||||
* Download artifact from artifactory. In the example above the artifact will be fetched from: http://artifactory.intranet.company.com/artifactory/ext-release-local/com/company/application/webcomponent/0.1/webcomponent-0.1.war
|
||||
* Download artifact from artifactory. In the example above the artifact will be fetched from: http://artifactory.intranet.example.com/artifactory/ext-release-local/com/company/application/webcomponent/0.1/webcomponent-0.1.war
|
||||
As a rule, for released versions the artifacts are downloaded from: artifactory_url/repository/group_id_with_slashed_instead_of_dots/artifact_id/version/artifact_id-version.packaging'
|
||||
This follows artifactory convention for artifact resolution. By default the artifact will be downloaded to /tmp directory on minion.
|
||||
* Connect to JBoss via controller (defined in jboss_config dict) and check if the artifact is not deployed already. In case of artifactory
|
||||
|
@ -391,7 +391,7 @@ def deployed(name, jboss_config, artifact=None, salt_source=None):
|
|||
application_deployed:
|
||||
jboss7.deployed:
|
||||
- artifact:
|
||||
artifactory_url: http://artifactory.intranet.company.com/artifactory
|
||||
artifactory_url: http://artifactory.intranet.example.com/artifactory
|
||||
repository: 'ext-snapshot-local'
|
||||
artifact_id: 'webcomponent'
|
||||
group_id: 'com.company.application'
|
||||
|
@ -402,9 +402,9 @@ def deployed(name, jboss_config, artifact=None, salt_source=None):
|
|||
Deploying snapshot version involves an additional step of resolving the exact version of the artifact (including the timestamp), which
|
||||
is not necessary when deploying a release.
|
||||
In the example above first a request will be made to retrieve the update timestamp from:
|
||||
http://artifactory.intranet.company.com/artifactory/ext-snapshot-local/com/company/application/webcomponent/0.1-SNAPSHOT/maven-metadata.xml
|
||||
http://artifactory.intranet.example.com/artifactory/ext-snapshot-local/com/company/application/webcomponent/0.1-SNAPSHOT/maven-metadata.xml
|
||||
Then the artifact will be fetched from
|
||||
http://artifactory.intranet.company.com/artifactory/ext-snapshot-local/com/company/application/webcomponent/0.1-SNAPSHOT/webcomponent-RESOLVED_SNAPSHOT_VERSION.war
|
||||
http://artifactory.intranet.example.com/artifactory/ext-snapshot-local/com/company/application/webcomponent/0.1-SNAPSHOT/webcomponent-RESOLVED_SNAPSHOT_VERSION.war
|
||||
|
||||
.. note:: In order to perform a snapshot deployment you have to:
|
||||
|
||||
|
@ -421,7 +421,7 @@ def deployed(name, jboss_config, artifact=None, salt_source=None):
|
|||
application_deployed:
|
||||
jboss7.deployed:
|
||||
- artifact:
|
||||
artifactory_url: http://artifactory.intranet.company.com/artifactory
|
||||
artifactory_url: http://artifactory.intranet.example.com/artifactory
|
||||
repository: 'ext-snapshot-local'
|
||||
artifact_id: 'webcomponent'
|
||||
group_id: 'com.company.application'
|
||||
|
@ -432,7 +432,7 @@ def deployed(name, jboss_config, artifact=None, salt_source=None):
|
|||
|
||||
|
||||
In this example the artifact will be retrieved from:
|
||||
http://artifactory.intranet.company.com/artifactory/ext-snapshot-local/com/company/application/webcomponent/0.1-SNAPSHOT/webcomponent-0.1-20141023.131756-19.war
|
||||
http://artifactory.intranet.example.com/artifactory/ext-snapshot-local/com/company/application/webcomponent/0.1-SNAPSHOT/webcomponent-0.1-20141023.131756-19.war
|
||||
|
||||
4) Deployment of latest snapshot of artifact from Artifactory.
|
||||
|
||||
|
@ -441,7 +441,7 @@ def deployed(name, jboss_config, artifact=None, salt_source=None):
|
|||
application_deployed:
|
||||
jboss7.deployed:
|
||||
- artifact:
|
||||
artifactory_url: http://artifactory.intranet.company.com/artifactory
|
||||
artifactory_url: http://artifactory.intranet.example.com/artifactory
|
||||
repository: 'ext-snapshot-local'
|
||||
artifact_id: 'webcomponent'
|
||||
group_id: 'com.company.application'
|
||||
|
|
|
@ -88,13 +88,11 @@ def _get_profile(service, region, key, keyid, profile):
|
|||
key = _profile.get('key', None)
|
||||
keyid = _profile.get('keyid', None)
|
||||
region = _profile.get('region', None)
|
||||
|
||||
if not region and __salt__['config.option'](service + '.region'):
|
||||
region = __salt__['config.option'](service + '.region')
|
||||
|
||||
if not region:
|
||||
region = 'us-east-1'
|
||||
|
||||
if not key and __salt__['config.option'](service + '.key'):
|
||||
key = __salt__['config.option'](service + '.key')
|
||||
if not keyid and __salt__['config.option'](service + '.keyid'):
|
||||
|
@ -247,7 +245,7 @@ def exactly_one(l):
|
|||
return exactly_n(l)
|
||||
|
||||
|
||||
def assign_funcs(modname, service, module=None):
|
||||
def assign_funcs(modname, service, module=None, pack=None):
|
||||
'''
|
||||
Assign _get_conn and _cache_id functions to the named module.
|
||||
|
||||
|
@ -255,6 +253,9 @@ def assign_funcs(modname, service, module=None):
|
|||
|
||||
_utils__['boto.assign_partials'](__name__, 'ec2')
|
||||
'''
|
||||
if pack:
|
||||
global __salt__ # pylint: disable=W0601
|
||||
__salt__ = pack
|
||||
mod = sys.modules[modname]
|
||||
setattr(mod, '_get_conn', get_connection_func(service, module=module))
|
||||
setattr(mod, '_cache_id', cache_id_func(service))
|
||||
|
|
|
@ -1307,7 +1307,11 @@ class Dulwich(GitProvider): # pylint: disable=abstract-method
|
|||
all the empty directories within it in the "blobs" list
|
||||
'''
|
||||
for item in six.iteritems(tree):
|
||||
obj = self.repo.get_object(item.sha)
|
||||
try:
|
||||
obj = self.repo.get_object(item.sha)
|
||||
except KeyError:
|
||||
# Entry is a submodule, skip it
|
||||
continue
|
||||
if not isinstance(obj, dulwich.objects.Tree):
|
||||
continue
|
||||
blobs.append(os.path.join(prefix, item.path))
|
||||
|
@ -1380,6 +1384,21 @@ class Dulwich(GitProvider): # pylint: disable=abstract-method
|
|||
'Unable to remove {0}: {1}'.format(self.cachedir, exc)
|
||||
)
|
||||
return False
|
||||
else:
|
||||
# Dulwich does not write fetched references to the gitdir, that is
|
||||
# done manually below (see the "Update local refs" comment). Since
|
||||
# A) gitfs doesn't check out any local branches, B) both Pygit2 and
|
||||
# GitPython set remote refs when fetching instead of head refs, and
|
||||
# C) Dulwich is not supported for git_pillar or winrepo, there is
|
||||
# no harm in simply renaming the head refs from the fetch results
|
||||
# to remote refs. This allows the same logic (see the
|
||||
# "_get_envs_from_ref_paths()" function) to be used for all three
|
||||
# GitProvider subclasses to derive available envs.
|
||||
for ref in [x for x in refs_post if x.startswith('refs/heads/')]:
|
||||
val = refs_post.pop(ref)
|
||||
key = ref.replace('refs/heads/', 'refs/remotes/origin/', 1)
|
||||
refs_post[key] = val
|
||||
|
||||
if refs_post is None:
|
||||
# Empty repository
|
||||
log.warning(
|
||||
|
@ -1392,7 +1411,7 @@ class Dulwich(GitProvider): # pylint: disable=abstract-method
|
|||
for ref in self.get_env_refs(refs_post):
|
||||
self.repo[ref] = refs_post[ref]
|
||||
# Prune stale refs
|
||||
for ref in self.repo.get_refs():
|
||||
for ref in refs_pre:
|
||||
if ref not in refs_post:
|
||||
del self.repo[ref]
|
||||
return True
|
||||
|
@ -1408,7 +1427,11 @@ class Dulwich(GitProvider): # pylint: disable=abstract-method
|
|||
all the file paths and symlinks info in the "blobs" dict
|
||||
'''
|
||||
for item in six.iteritems(tree):
|
||||
obj = self.repo.get_object(item.sha)
|
||||
try:
|
||||
obj = self.repo.get_object(item.sha)
|
||||
except KeyError:
|
||||
# Entry is a submodule, skip it
|
||||
continue
|
||||
if isinstance(obj, dulwich.objects.Blob):
|
||||
repo_path = os.path.join(prefix, item.path)
|
||||
blobs.setdefault('files', []).append(repo_path)
|
||||
|
@ -1502,10 +1525,19 @@ class Dulwich(GitProvider): # pylint: disable=abstract-method
|
|||
refs = self.repo.get_refs()
|
||||
# Sorting ensures we check heads (branches) before tags
|
||||
for ref in sorted(self.get_env_refs(refs)):
|
||||
# ref will be something like 'refs/heads/master'
|
||||
rtype, rspec = ref[5:].split('/', 1)
|
||||
# ref will be something like 'refs/remotes/origin/master'
|
||||
try:
|
||||
rtype, rspec = re.split('^refs/(remotes/origin|tags)/',
|
||||
ref,
|
||||
1)[-2:]
|
||||
except ValueError:
|
||||
# No match was fount for the split regex, we don't care about
|
||||
# this ref. We shouldn't see any of these as the refs are being
|
||||
# filtered through self.get_env_refs(), but just in case, this
|
||||
# will avoid a traceback.
|
||||
continue
|
||||
if rspec == tgt_ref and self.env_is_exposed(tgt_env):
|
||||
if rtype == 'heads':
|
||||
if rtype == 'remotes/origin':
|
||||
commit = self.repo.get_object(refs[ref])
|
||||
elif rtype == 'tags':
|
||||
tag = self.repo.get_object(refs[ref])
|
||||
|
|
|
@ -504,31 +504,33 @@ class Schedule(object):
|
|||
func = None
|
||||
if func not in self.functions:
|
||||
log.info(
|
||||
'Invalid function: {0} in job {1}. Ignoring.'.format(
|
||||
'Invalid function: {0} in scheduled job {1}.'.format(
|
||||
func, name
|
||||
)
|
||||
)
|
||||
|
||||
if 'name' not in data:
|
||||
data['name'] = name
|
||||
log.info(
|
||||
'Running Job: {0}.'.format(name)
|
||||
)
|
||||
|
||||
multiprocessing_enabled = self.opts.get('multiprocessing', True)
|
||||
if multiprocessing_enabled:
|
||||
thread_cls = SignalHandlingMultiprocessingProcess
|
||||
else:
|
||||
if 'name' not in data:
|
||||
data['name'] = name
|
||||
log.info(
|
||||
'Running Job: {0}.'.format(name)
|
||||
)
|
||||
multiprocessing_enabled = self.opts.get('multiprocessing', True)
|
||||
if multiprocessing_enabled:
|
||||
thread_cls = SignalHandlingMultiprocessingProcess
|
||||
else:
|
||||
thread_cls = threading.Thread
|
||||
proc = thread_cls(target=self.handle_func, args=(multiprocessing_enabled, func, data))
|
||||
if multiprocessing_enabled:
|
||||
with default_signals(signal.SIGINT, signal.SIGTERM):
|
||||
# Reset current signals before starting the process in
|
||||
# order not to inherit the current signal handlers
|
||||
proc.start()
|
||||
else:
|
||||
thread_cls = threading.Thread
|
||||
|
||||
proc = thread_cls(target=self.handle_func, args=(multiprocessing_enabled, func, data))
|
||||
if multiprocessing_enabled:
|
||||
with default_signals(signal.SIGINT, signal.SIGTERM):
|
||||
# Reset current signals before starting the process in
|
||||
# order not to inherit the current signal handlers
|
||||
proc.start()
|
||||
if multiprocessing_enabled:
|
||||
proc.join()
|
||||
else:
|
||||
proc.start()
|
||||
if multiprocessing_enabled:
|
||||
proc.join()
|
||||
|
||||
def enable_schedule(self):
|
||||
'''
|
||||
|
@ -671,41 +673,43 @@ class Schedule(object):
|
|||
except OSError:
|
||||
log.info('Unable to remove file: {0}.'.format(fn_))
|
||||
|
||||
if multiprocessing_enabled and not salt.utils.is_windows():
|
||||
# Shutdown the multiprocessing before daemonizing
|
||||
log_setup.shutdown_multiprocessing_logging()
|
||||
|
||||
salt.utils.daemonize_if(self.opts)
|
||||
|
||||
if multiprocessing_enabled and not salt.utils.is_windows():
|
||||
# Reconfigure multiprocessing logging after daemonizing
|
||||
log_setup.setup_multiprocessing_logging()
|
||||
|
||||
ret['pid'] = os.getpid()
|
||||
|
||||
if 'jid_include' not in data or data['jid_include']:
|
||||
log.debug('schedule.handle_func: adding this job to the jobcache '
|
||||
'with data {0}'.format(ret))
|
||||
# write this to /var/cache/salt/minion/proc
|
||||
with salt.utils.fopen(proc_fn, 'w+b') as fp_:
|
||||
fp_.write(salt.payload.Serial(self.opts).dumps(ret))
|
||||
|
||||
args = tuple()
|
||||
if 'args' in data:
|
||||
args = data['args']
|
||||
|
||||
kwargs = {}
|
||||
if 'kwargs' in data:
|
||||
kwargs = data['kwargs']
|
||||
# if the func support **kwargs, lets pack in the pub data we have
|
||||
# TODO: pack the *same* pub data as a minion?
|
||||
argspec = salt.utils.args.get_function_argspec(self.functions[func])
|
||||
if argspec.keywords:
|
||||
# this function accepts **kwargs, pack in the publish data
|
||||
for key, val in six.iteritems(ret):
|
||||
kwargs['__pub_{0}'.format(key)] = val
|
||||
|
||||
try:
|
||||
salt.utils.daemonize_if(self.opts)
|
||||
|
||||
ret['pid'] = os.getpid()
|
||||
|
||||
if 'jid_include' not in data or data['jid_include']:
|
||||
log.debug('schedule.handle_func: adding this job to the jobcache '
|
||||
'with data {0}'.format(ret))
|
||||
# write this to /var/cache/salt/minion/proc
|
||||
with salt.utils.fopen(proc_fn, 'w+b') as fp_:
|
||||
fp_.write(salt.payload.Serial(self.opts).dumps(ret))
|
||||
|
||||
args = tuple()
|
||||
if 'args' in data:
|
||||
args = data['args']
|
||||
|
||||
kwargs = {}
|
||||
if 'kwargs' in data:
|
||||
kwargs = data['kwargs']
|
||||
|
||||
if func not in self.functions:
|
||||
ret['return'] = self.functions.missing_fun_string(func)
|
||||
salt.utils.error.raise_error(
|
||||
message=self.functions.missing_fun_string(func))
|
||||
|
||||
# if the func support **kwargs, lets pack in the pub data we have
|
||||
# TODO: pack the *same* pub data as a minion?
|
||||
argspec = salt.utils.args.get_function_argspec(self.functions[func])
|
||||
if argspec.keywords:
|
||||
# this function accepts **kwargs, pack in the publish data
|
||||
for key, val in six.iteritems(ret):
|
||||
kwargs['__pub_{0}'.format(key)] = val
|
||||
|
||||
ret['return'] = self.functions[func](*args, **kwargs)
|
||||
|
||||
data_returner = data.get('returner', None)
|
||||
|
@ -733,31 +737,34 @@ class Schedule(object):
|
|||
)
|
||||
)
|
||||
|
||||
# Only attempt to return data to the master
|
||||
# if the scheduled job is running on a minion.
|
||||
if '__role' in self.opts and self.opts['__role'] == 'minion':
|
||||
if 'return_job' in data and not data['return_job']:
|
||||
pass
|
||||
else:
|
||||
# Send back to master so the job is included in the job list
|
||||
mret = ret.copy()
|
||||
mret['jid'] = 'req'
|
||||
channel = salt.transport.Channel.factory(self.opts, usage='salt_schedule')
|
||||
load = {'cmd': '_return', 'id': self.opts['id']}
|
||||
for key, value in six.iteritems(mret):
|
||||
load[key] = value
|
||||
try:
|
||||
channel.send(load)
|
||||
except salt.exceptions.SaltReqTimeoutError:
|
||||
log.error('Timeout error during scheduled job: {0}. Salt master could not be reached.'.format(ret['fun']))
|
||||
|
||||
ret['retcode'] = self.functions.pack['__context__']['retcode']
|
||||
ret['success'] = True
|
||||
except Exception:
|
||||
log.exception("Unhandled exception running {0}".format(ret['fun']))
|
||||
# Although catch-all exception handlers are bad, the exception here
|
||||
# is to let the exception bubble up to the top of the thread context,
|
||||
# where the thread will die silently, which is worse.
|
||||
if 'return' not in ret:
|
||||
ret['return'] = "Unhandled exception running {0}".format(ret['fun'])
|
||||
ret['success'] = False
|
||||
ret['retcode'] = 254
|
||||
finally:
|
||||
try:
|
||||
# Only attempt to return data to the master
|
||||
# if the scheduled job is running on a minion.
|
||||
if '__role' in self.opts and self.opts['__role'] == 'minion':
|
||||
if 'return_job' in data and not data['return_job']:
|
||||
pass
|
||||
else:
|
||||
# Send back to master so the job is included in the job list
|
||||
mret = ret.copy()
|
||||
mret['jid'] = 'req'
|
||||
channel = salt.transport.Channel.factory(self.opts, usage='salt_schedule')
|
||||
load = {'cmd': '_return', 'id': self.opts['id']}
|
||||
for key, value in six.iteritems(mret):
|
||||
load[key] = value
|
||||
channel.send(load)
|
||||
|
||||
log.debug('schedule.handle_func: Removing {0}'.format(proc_fn))
|
||||
os.unlink(proc_fn)
|
||||
except OSError as exc:
|
||||
|
@ -803,11 +810,10 @@ class Schedule(object):
|
|||
func = None
|
||||
if func not in self.functions:
|
||||
log.info(
|
||||
'Invalid function: {0} in job {1}. Ignoring.'.format(
|
||||
'Invalid function: {0} in scheduled job {1}.'.format(
|
||||
func, job
|
||||
)
|
||||
)
|
||||
continue
|
||||
if 'name' not in data:
|
||||
data['name'] = job
|
||||
# Add up how many seconds between now and then
|
||||
|
|
|
@ -181,7 +181,7 @@ def wrap_tmpl_func(render_str):
|
|||
output = os.linesep.join(output.splitlines())
|
||||
|
||||
except SaltRenderError as exc:
|
||||
log.error("Rendering exception occurred :{0}".format(exc))
|
||||
log.error("Rendering exception occurred: {0}".format(exc))
|
||||
#return dict(result=False, data=str(exc))
|
||||
raise
|
||||
except Exception:
|
||||
|
|
|
@ -246,7 +246,7 @@ def get_inventory(service_instance):
|
|||
return service_instance.RetrieveContent()
|
||||
|
||||
|
||||
def get_content(service_instance, obj_type, property_list=None):
|
||||
def get_content(service_instance, obj_type, property_list=None, container_ref=None):
|
||||
'''
|
||||
Returns the content of the specified type of object for a Service Instance.
|
||||
|
||||
|
@ -261,10 +261,19 @@ def get_content(service_instance, obj_type, property_list=None):
|
|||
|
||||
property_list
|
||||
An optional list of object properties to used to return even more filtered content results.
|
||||
|
||||
container_ref
|
||||
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
|
||||
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
|
||||
rootFolder.
|
||||
'''
|
||||
# Start at the rootFolder if container starting point not specified
|
||||
if not container_ref:
|
||||
container_ref = service_instance.content.rootFolder
|
||||
|
||||
# Create an object view
|
||||
obj_view = service_instance.content.viewManager.CreateContainerView(
|
||||
service_instance.content.rootFolder, [obj_type], True)
|
||||
container_ref, [obj_type], True)
|
||||
|
||||
# Create traversal spec to determine the path for collection
|
||||
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
|
||||
|
@ -304,7 +313,7 @@ def get_content(service_instance, obj_type, property_list=None):
|
|||
return content
|
||||
|
||||
|
||||
def get_mor_by_property(service_instance, object_type, property_value, property_name='name'):
|
||||
def get_mor_by_property(service_instance, object_type, property_value, property_name='name', container_ref=None):
|
||||
'''
|
||||
Returns the first managed object reference having the specified property value.
|
||||
|
||||
|
@ -319,9 +328,14 @@ def get_mor_by_property(service_instance, object_type, property_value, property_
|
|||
|
||||
property_name
|
||||
An object property used to return the specified object reference results. Defaults to ``name``.
|
||||
|
||||
container_ref
|
||||
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
|
||||
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
|
||||
rootFolder.
|
||||
'''
|
||||
# Get list of all managed object references with specified property
|
||||
object_list = get_mors_with_properties(service_instance, object_type, property_list=[property_name])
|
||||
object_list = get_mors_with_properties(service_instance, object_type, property_list=[property_name], container_ref=container_ref)
|
||||
|
||||
for obj in object_list:
|
||||
if obj[property_name] == property_value:
|
||||
|
@ -330,7 +344,7 @@ def get_mor_by_property(service_instance, object_type, property_value, property_
|
|||
return None
|
||||
|
||||
|
||||
def get_mors_with_properties(service_instance, object_type, property_list=None):
|
||||
def get_mors_with_properties(service_instance, object_type, property_list=None, container_ref=None):
|
||||
'''
|
||||
Returns a list containing properties and managed object references for the managed object.
|
||||
|
||||
|
@ -342,9 +356,14 @@ def get_mors_with_properties(service_instance, object_type, property_list=None):
|
|||
|
||||
property_list
|
||||
An optional list of object properties used to return even more filtered managed object reference results.
|
||||
|
||||
container_ref
|
||||
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
|
||||
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
|
||||
rootFolder.
|
||||
'''
|
||||
# Get all the content
|
||||
content = get_content(service_instance, object_type, property_list=property_list)
|
||||
content = get_content(service_instance, object_type, property_list=property_list, container_ref=container_ref)
|
||||
|
||||
object_list = []
|
||||
for obj in content:
|
||||
|
|
|
@ -556,6 +556,7 @@ def dependency_information(include_salt_cloud=False):
|
|||
('timelib', 'timelib', 'version'),
|
||||
('dateutil', 'dateutil', '__version__'),
|
||||
('pygit2', 'pygit2', '__version__'),
|
||||
('libgit2', 'pygit2', 'LIBGIT2_VERSION'),
|
||||
('smmap', 'smmap', '__version__'),
|
||||
('cffi', 'cffi', '__version__'),
|
||||
('pycparser', 'pycparser', '__version__'),
|
||||
|
|
|
@ -52,7 +52,7 @@ class ArtifactoryTestCase(TestCase):
|
|||
</versioning>
|
||||
</metadata>
|
||||
''')
|
||||
metadata = artifactory._get_artifact_metadata(artifactory_url='http://artifactory.company.com/artifactory',
|
||||
metadata = artifactory._get_artifact_metadata(artifactory_url='http://artifactory.example.com/artifactory',
|
||||
repository='libs-releases',
|
||||
group_id='com.company.sampleapp.web-module',
|
||||
artifact_id='web',
|
||||
|
@ -86,7 +86,7 @@ class ArtifactoryTestCase(TestCase):
|
|||
</versioning>
|
||||
</metadata>
|
||||
''')
|
||||
metadata = artifactory._get_snapshot_version_metadata(artifactory_url='http://artifactory.company.com/artifactory',
|
||||
metadata = artifactory._get_snapshot_version_metadata(artifactory_url='http://artifactory.example.com/artifactory',
|
||||
repository='libs-releases',
|
||||
group_id='com.company.sampleapp.web-module',
|
||||
artifact_id='web',
|
||||
|
@ -95,38 +95,38 @@ class ArtifactoryTestCase(TestCase):
|
|||
self.assertEqual(metadata['snapshot_versions']['war'], '1.1_RC8-20140418.150212-1')
|
||||
|
||||
def test_artifact_metadata_url(self):
|
||||
metadata_url = artifactory._get_artifact_metadata_url(artifactory_url='http://artifactory.company.com/artifactory',
|
||||
metadata_url = artifactory._get_artifact_metadata_url(artifactory_url='http://artifactory.example.com/artifactory',
|
||||
repository='libs-releases',
|
||||
group_id='com.company.sampleapp.web-module',
|
||||
artifact_id='web')
|
||||
|
||||
self.assertEqual(metadata_url, "http://artifactory.company.com/artifactory/libs-releases/com/company/sampleapp/web-module/web/maven-metadata.xml")
|
||||
self.assertEqual(metadata_url, "http://artifactory.example.com/artifactory/libs-releases/com/company/sampleapp/web-module/web/maven-metadata.xml")
|
||||
|
||||
def test_snapshot_version_metadata_url(self):
|
||||
metadata_url = artifactory._get_snapshot_version_metadata_url(artifactory_url='http://artifactory.company.com/artifactory',
|
||||
metadata_url = artifactory._get_snapshot_version_metadata_url(artifactory_url='http://artifactory.example.com/artifactory',
|
||||
repository='libs-snapshots',
|
||||
group_id='com.company.sampleapp.web-module',
|
||||
artifact_id='web',
|
||||
version='1.0_RC10-SNAPSHOT')
|
||||
|
||||
self.assertEqual(metadata_url, "http://artifactory.company.com/artifactory/libs-snapshots/com/company/sampleapp/web-module/web/1.0_RC10-SNAPSHOT/maven-metadata.xml")
|
||||
self.assertEqual(metadata_url, "http://artifactory.example.com/artifactory/libs-snapshots/com/company/sampleapp/web-module/web/1.0_RC10-SNAPSHOT/maven-metadata.xml")
|
||||
|
||||
def test_construct_url_for_released_version(self):
|
||||
artifact_url, file_name = artifactory._get_release_url(repository='libs-releases',
|
||||
group_id='com.company.sampleapp.web-module',
|
||||
artifact_id='web',
|
||||
packaging='war',
|
||||
artifactory_url='http://artifactory.company.com/artifactory',
|
||||
artifactory_url='http://artifactory.example.com/artifactory',
|
||||
version='1.0_RC20')
|
||||
|
||||
self.assertEqual(artifact_url, "http://artifactory.company.com/artifactory/libs-releases/com/company/sampleapp/web-module/web/1.0_RC20/web-1.0_RC20.war")
|
||||
self.assertEqual(artifact_url, "http://artifactory.example.com/artifactory/libs-releases/com/company/sampleapp/web-module/web/1.0_RC20/web-1.0_RC20.war")
|
||||
self.assertEqual(file_name, "web-1.0_RC20.war")
|
||||
|
||||
def test_construct_url_for_snapshot_version(self):
|
||||
prev_artifactory_get_snapshot_version_metadata = artifactory._get_snapshot_version_metadata
|
||||
artifactory._get_snapshot_version_metadata = MagicMock(return_value={'snapshot_versions': {'war': '1.0_RC10-20131127.105838-2'}})
|
||||
|
||||
artifact_url, file_name = artifactory._get_snapshot_url(artifactory_url='http://artifactory.company.com/artifactory',
|
||||
artifact_url, file_name = artifactory._get_snapshot_url(artifactory_url='http://artifactory.example.com/artifactory',
|
||||
repository='libs-snapshots',
|
||||
group_id='com.company.sampleapp.web-module',
|
||||
artifact_id='web',
|
||||
|
@ -134,5 +134,5 @@ class ArtifactoryTestCase(TestCase):
|
|||
packaging='war',
|
||||
headers={})
|
||||
|
||||
self.assertEqual(artifact_url, "http://artifactory.company.com/artifactory/libs-snapshots/com/company/sampleapp/web-module/web/1.0_RC10-SNAPSHOT/web-1.0_RC10-20131127.105838-2.war")
|
||||
self.assertEqual(artifact_url, "http://artifactory.example.com/artifactory/libs-snapshots/com/company/sampleapp/web-module/web/1.0_RC10-SNAPSHOT/web-1.0_RC10-20131127.105838-2.war")
|
||||
self.assertEqual(file_name, "web-1.0_RC10-20131127.105838-2.war")
|
||||
|
|
|
@ -70,9 +70,10 @@ dhcp_options_parameters.update(conn_parameters)
|
|||
|
||||
opts = salt.config.DEFAULT_MINION_OPTS
|
||||
utils = salt.loader.utils(opts, whitelist=['boto'])
|
||||
mods = salt.loader.minion_mods(opts)
|
||||
|
||||
boto_vpc.__utils__ = utils
|
||||
boto_vpc.__init__(opts)
|
||||
boto_vpc.__init__(opts, pack=mods)
|
||||
|
||||
|
||||
def _has_required_boto():
|
||||
|
|
|
@ -79,7 +79,7 @@ class JBoss7CliTestCase(TestCase):
|
|||
'instance_name': 'Instance1',
|
||||
'cli_user': 'jbossadm',
|
||||
'cli_password': 'jbossadm',
|
||||
'status_url': 'http://sampleapp.company.com:8080/'
|
||||
'status_url': 'http://sampleapp.example.com:8080/'
|
||||
}
|
||||
|
||||
def setUp(self):
|
||||
|
@ -301,7 +301,7 @@ class JBoss7CliTestCase(TestCase):
|
|||
"blocking-timeout-wait-millis" => undefined,
|
||||
"check-valid-connection-sql" => undefined,
|
||||
"connection-properties" => undefined,
|
||||
"connection-url" => "jdbc:mysql:thin:@db.company.com",
|
||||
"connection-url" => "jdbc:mysql:thin:@db.example.com",
|
||||
"datasource-class" => undefined,
|
||||
"driver-class" => undefined,
|
||||
"driver-name" => "mysql",
|
||||
|
|
|
@ -56,7 +56,7 @@ class SeedTestCase(TestCase):
|
|||
with patch.object(os.path, 'exists', return_value=True):
|
||||
with patch.object(os, 'chmod', return_value=None):
|
||||
with patch.object(shutil, 'copy', return_value=None):
|
||||
self.assertEqual(seed.prep_bootstrap('mpt'), 'A')
|
||||
self.assertEqual(seed.prep_bootstrap('mpt'), ('A', 'A'))
|
||||
|
||||
def test_apply_(self):
|
||||
'''
|
||||
|
|
|
@ -133,10 +133,9 @@ class MockState(object):
|
|||
flag = False
|
||||
opts = {'state_top': ""}
|
||||
|
||||
def __init__(self, opts, pillar=None, jid=None, pillar_enc=None, mocked=None):
|
||||
def __init__(self, opts, pillar=None, *args, **kwargs):
|
||||
self.state = MockState.State(opts,
|
||||
pillar=pillar,
|
||||
pillar_enc=pillar_enc)
|
||||
pillar=pillar)
|
||||
|
||||
def render_state(self, sls, saltenv, mods, matches, local=False):
|
||||
'''
|
||||
|
|
|
@ -36,7 +36,7 @@ class ArtifactoryTestCase(TestCase):
|
|||
given location.
|
||||
'''
|
||||
name = 'jboss'
|
||||
arti_url = 'http://artifactory.intranet.company.com/artifactory'
|
||||
arti_url = 'http://artifactory.intranet.example.com/artifactory'
|
||||
artifact = {'artifactory_url': arti_url, 'artifact_id': 'module',
|
||||
'repository': 'libs-release-local', 'packaging': 'jar',
|
||||
'group_id': 'com.company.module', 'classifier': 'sources',
|
||||
|
|
|
@ -14,6 +14,7 @@ from salttesting.mock import (
|
|||
MagicMock,
|
||||
mock_open,
|
||||
patch)
|
||||
|
||||
ensure_in_syspath('../../')
|
||||
|
||||
# Import third party libs
|
||||
|
@ -89,13 +90,6 @@ class TestFileState(TestCase):
|
|||
ret = filestate.managed('/tmp/foo', contents='hi', contents_pillar='foo:bar')
|
||||
self.assertEqual(False, ret['result'])
|
||||
|
||||
def test_contents_pillar_adds_newline(self):
|
||||
# make sure the newline
|
||||
pillar_value = 'i am the pillar value'
|
||||
expected = '{0}\n'.format(pillar_value)
|
||||
|
||||
self.run_contents_pillar(pillar_value, expected)
|
||||
|
||||
def test_contents_pillar_doesnt_add_more_newlines(self):
|
||||
# make sure the newline
|
||||
pillar_value = 'i am the pillar value\n'
|
||||
|
@ -123,9 +117,6 @@ class TestFileState(TestCase):
|
|||
|
||||
ret = filestate.managed(path, contents_pillar=pillar_path)
|
||||
|
||||
# make sure the pillar_mock is called with the given path
|
||||
pillar_mock.assert_called_once_with(pillar_path)
|
||||
|
||||
# make sure no errors are returned
|
||||
self.assertEqual(None, ret)
|
||||
|
||||
|
@ -609,8 +600,8 @@ class FileTestCase(TestCase):
|
|||
group=group,
|
||||
defaults=True), ret)
|
||||
|
||||
comt = ('Only one of contents, contents_pillar, '
|
||||
'and contents_grains is permitted')
|
||||
comt = ('Only one of \'contents\', \'contents_pillar\', '
|
||||
'and \'contents_grains\' is permitted')
|
||||
ret.update({'comment': comt})
|
||||
self.assertDictEqual(filestate.managed
|
||||
(name, user=user, group=group,
|
||||
|
|
|
@ -6,7 +6,7 @@ from distutils.version import LooseVersion # pylint: disable=import-error,no-na
|
|||
|
||||
# Import Salt Testing libs
|
||||
from salttesting.unit import skipIf, TestCase
|
||||
from salttesting.mock import NO_MOCK, NO_MOCK_REASON, patch
|
||||
from salttesting.mock import NO_MOCK, NO_MOCK_REASON, patch, MagicMock
|
||||
from salttesting.helpers import ensure_in_syspath
|
||||
ensure_in_syspath('../../')
|
||||
|
||||
|
@ -111,6 +111,7 @@ class BotoUtilsTestCaseBase(TestCase):
|
|||
salt.utils.boto.__context__ = {}
|
||||
salt.utils.boto.__opts__ = {}
|
||||
salt.utils.boto.__pillar__ = {}
|
||||
salt.utils.boto.__salt__ = {'config.option': MagicMock(return_value='dummy_opt')}
|
||||
|
||||
|
||||
class BotoUtilsCacheIdTestCase(BotoUtilsTestCaseBase):
|
||||
|
|
Loading…
Add table
Reference in a new issue