mirror of
https://github.com/saltstack/salt.git
synced 2025-04-17 10:10:20 +00:00
Merge branch 'develop' of git://github.com/saltstack/salt into develop
This commit is contained in:
commit
6a3a21ba14
148 changed files with 13485 additions and 1269 deletions
2
.gitignore
vendored
2
.gitignore
vendored
|
@ -1,6 +1,8 @@
|
|||
build
|
||||
*.py[co]
|
||||
pkg/arch/*.tar.xz
|
||||
*.swp
|
||||
doc/_build
|
||||
|
||||
# virtualenv
|
||||
# - ignores directories of a virtualenv when you create it right on
|
||||
|
|
2
AUTHORS
2
AUTHORS
|
@ -27,4 +27,6 @@ Joseph P. Hall
|
|||
|
||||
Erik Nolte
|
||||
|
||||
Jeff Schroeder aka SEJeff
|
||||
|
||||
Matthias Teege
|
||||
|
|
16
conf/master
16
conf/master
|
@ -33,12 +33,12 @@
|
|||
##########################################
|
||||
# Enable "open mode", this mode still maintains encryption, but turns off
|
||||
# authentication, this is only intended for highly secure environments or for
|
||||
# the situation where your keys end up in a bad state. If you run in open more
|
||||
# the situation where your keys end up in a bad state. If you run in open mode
|
||||
# you do so at your own risk!
|
||||
#open_mode: False
|
||||
|
||||
# Enable auto_accept, this setting will automatically accept all incoming
|
||||
# public keys from the minions
|
||||
# public keys from the minions. Note that this is insecure.
|
||||
#auto_accept: False
|
||||
|
||||
##### State System settings #####
|
||||
|
@ -168,3 +168,15 @@
|
|||
# }
|
||||
#
|
||||
#log_granular_levels: {}
|
||||
|
||||
|
||||
##### Node Groups #####
|
||||
##########################################
|
||||
# Node groups allow for logical groupings of minion nodes.
|
||||
# A group consists of a group name and a compound target.
|
||||
#
|
||||
# nodegroups: {
|
||||
# group1: 'L@foo.domain.com,bar.domain.com,baz.domain.com and bl*.domain.com',
|
||||
# group2: 'G@os:Debian and foo.domain.com',
|
||||
# }
|
||||
|
||||
|
|
15
conf/minion
15
conf/minion
|
@ -59,8 +59,11 @@
|
|||
#
|
||||
#renderer: yaml_jinja
|
||||
#
|
||||
# Test allows for the state runs to only be test runs
|
||||
#test: False
|
||||
# state_verbose allows for the data returned from the minion to be more
|
||||
# verbose. Normaly only states that fail or states that have changes are
|
||||
# returned, but setting state_verbose to True will return all states that
|
||||
# were checked
|
||||
#state_verbose: False
|
||||
|
||||
###### Security settings #####
|
||||
###########################################
|
||||
|
@ -97,6 +100,14 @@
|
|||
#log_granular_levels: {}
|
||||
|
||||
|
||||
###### Miscellaneous configuration #####
|
||||
###########################################
|
||||
# When waiting for a master to accept the minion's public key, salt will
|
||||
# contiuously attempt to reconnect until successful. This is the time, in
|
||||
# seconds, between those reconnection attempts.
|
||||
# acceptance_wait_time = 10
|
||||
|
||||
|
||||
###### Module configuration #####
|
||||
###########################################
|
||||
# Salt allows for modules to be passed arbitrary configuration data, any data
|
||||
|
|
30
debian/AUTHORS
vendored
30
debian/AUTHORS
vendored
|
@ -1,30 +0,0 @@
|
|||
=============
|
||||
Salt Authors
|
||||
=============
|
||||
|
||||
Whos Who in Salt
|
||||
============================
|
||||
|
||||
The Man With the Plan
|
||||
----------------------------
|
||||
|
||||
Thomas S Hatch is the main developer of Salt. He is the founder, owner,
|
||||
maintainer and lead of the Salt project, as well as author of the majority
|
||||
of the Salt code and documentation.
|
||||
|
||||
Documentation System
|
||||
----------------------------
|
||||
|
||||
The documentation system was put together by Seth House, much of the
|
||||
documentation is being maintained by Seth
|
||||
|
||||
Developers
|
||||
----------------------------
|
||||
|
||||
Pedro Algarvio, aka, s0undt3ch
|
||||
|
||||
Joseph P. Hall
|
||||
|
||||
Erik Nolte
|
||||
|
||||
Matthias Teege
|
36
debian/changelog
vendored
36
debian/changelog
vendored
|
@ -1,36 +1,6 @@
|
|||
salt (0.9.4-1) lucid; urgency=low
|
||||
salt (0.9.5+git20111227.g8182e48-1) unstable; urgency=low
|
||||
|
||||
* Build PPA for 0.9.4
|
||||
* First package release. (Closes: #643789)
|
||||
|
||||
-- Seth House <seth@eseth.com> Sun, 27 Nov 2011 16:11:36 -0700
|
||||
-- Corey Quinn <corey@sequestered.net> Mon, 26 Dec 2011 13:55:22 -0800
|
||||
|
||||
salt (0.9.4.pre-d353743-1) lucid; urgency=low
|
||||
|
||||
* Build PPA for Ubuntu (fixed?)
|
||||
|
||||
-- Seth House <seth@eseth.com> Wed, 25 Nov 2011 23:19:00 -0600
|
||||
|
||||
salt (0.9.4.pre-6dd76f2-1) lucid; urgency=low
|
||||
|
||||
* Build PPA for Ubuntu (fixed?)
|
||||
|
||||
-- Seth House <seth@eseth.com> Wed, 25 Nov 2011 11:19:00 -0600
|
||||
|
||||
salt (0.9.4.pre-1) lucid; urgency=low
|
||||
|
||||
* Build PPA for Ubuntu
|
||||
|
||||
-- Seth House <seth@eseth.com> Wed, 25 Nov 2011 11:19:00 -0600
|
||||
|
||||
salt (0.9.2-2) unstable; urgency=low
|
||||
|
||||
* Fixed many lintian(1) warnings and errors
|
||||
|
||||
-- Aaron Toponce <aaron.toponce@gmail.com> Mon, 03 Oct 2011 21:34:25 -0600
|
||||
|
||||
salt (0.9.2-1) unstable; urgency=low
|
||||
|
||||
* Initial release. (Closes: #643789)
|
||||
* First Debian package
|
||||
|
||||
-- Aaron Toponce <aaron.toponce@gmail.com> Wed, 28 Sep 2011 01:21:48 -0600
|
||||
|
|
111
debian/control
vendored
111
debian/control
vendored
|
@ -1,24 +1,34 @@
|
|||
Source: salt
|
||||
Section: admin
|
||||
Priority: optional
|
||||
Maintainer: Aaron Toponce <aaron.toponce@gmail.com>
|
||||
Build-Depends: debhelper (>= 7), python-support, cython, python-yaml,
|
||||
python-setuptools, python-yaml, python-crypto, python-m2crypto,
|
||||
python-pyzmq (>= 2.1.9), libzmq1 (>= 2.1.9), libzmq-dev (>= 2.1.9),
|
||||
python (>= 2.6), python-jinja2
|
||||
Standards-Version: 3.8.3
|
||||
Maintainer: Corey Quinn <corey@sequestered.net>
|
||||
Build-Depends: debhelper (>= 7.0.50~),
|
||||
python-support,
|
||||
cython,
|
||||
python-yaml,
|
||||
python-setuptools,
|
||||
python-yaml,
|
||||
python-crypto,
|
||||
python-m2crypto,
|
||||
python-zmq (>= 2.1.9),
|
||||
libzmq1 (>= 2.1.9),
|
||||
libzmq-dev (>= 2.1.9),
|
||||
python (>= 2.6),
|
||||
python-dev (>= 2.6),
|
||||
python-jinja2
|
||||
Standards-Version: 3.9.2
|
||||
Homepage: http://saltstack.org
|
||||
#Vcs-Git: git://git.debian.org/collab-maint/salt.git
|
||||
#Vcs-Browser: http://git.debian.org/?p=collab-maint/salt.git;a=summary
|
||||
|
||||
Package: salt
|
||||
Architecture: all
|
||||
Depends: ${python:Depends}, ${misc:Depends}, python-setuptools,
|
||||
python-yaml, python-crypto, python-m2crypto, python-pyzmq (>= 2.1.9),
|
||||
libzmq1 (>= 2.1.9), libzmq-dev (>= 2.1.9), python (>= 2.6), python-jinja2
|
||||
Description: This package provides a remote manager to administer servers.
|
||||
This package is a powerful remote execution manager that can be used to
|
||||
administer servers in a fast and efficient way.
|
||||
|
||||
Package: salt-common
|
||||
Architecture: any
|
||||
Depends: ${python:Depends},
|
||||
${misc:Depends}
|
||||
Description: Shared libraries that salt requires for all packages
|
||||
This package is a powerful remote execution manager that can be used
|
||||
to administer servers in a fast and efficient way.
|
||||
.
|
||||
It allows commands to be executed across large groups of servers. This
|
||||
means systems can be easily managed, but data can also be easily gathered.
|
||||
|
@ -30,3 +40,76 @@ Description: This package provides a remote manager to administer servers.
|
|||
.
|
||||
Between the remote execution system, and state management Salt addresses
|
||||
the backbone of cloud and data center management.
|
||||
.
|
||||
This particular package provides shared libraries that salt-master, salt-minion,
|
||||
and salt-syndic require to function.
|
||||
|
||||
|
||||
Package: salt-master
|
||||
Architecture: all
|
||||
Depends: ${python:Depends},
|
||||
${misc:Depends},
|
||||
salt-common
|
||||
Description: This package provides a remote manager to administer servers via salt
|
||||
This package is a powerful remote execution manager that can be used
|
||||
to administer servers in a fast and efficient way.
|
||||
.
|
||||
It allows commands to be executed across large groups of servers. This
|
||||
means systems can be easily managed, but data can also be easily gathered.
|
||||
Quick introspection into running systems becomes a reality.
|
||||
.
|
||||
Remote execution is usually used to set up a certain state on a remote
|
||||
system. Salt addresses this problem as well, the salt state system uses
|
||||
salt state files to define the state a server needs to be in.
|
||||
.
|
||||
Between the remote execution system, and state management Salt addresses
|
||||
the backbone of cloud and data center management.
|
||||
.
|
||||
This particular package provides the salt controller.
|
||||
|
||||
|
||||
Package: salt-minion
|
||||
Architecture: all
|
||||
Depends: ${python:Depends},
|
||||
${misc:Depends},
|
||||
salt-common
|
||||
Description: This package represents the client package for salt
|
||||
This package is a powerful remote execution manager that can be used
|
||||
to administer servers in a fast and efficient way.
|
||||
.
|
||||
It allows commands to be executed across large groups of servers. This
|
||||
means systems can be easily managed, but data can also be easily gathered.
|
||||
Quick introspection into running systems becomes a reality.
|
||||
.
|
||||
Remote execution is usually used to set up a certain state on a remote
|
||||
system. Salt addresses this problem as well, the salt state system uses
|
||||
salt state files to define the state a server needs to be in.
|
||||
.
|
||||
Between the remote execution system, and state management Salt addresses
|
||||
the backbone of cloud and data center management.
|
||||
.
|
||||
This particular package provides the worker / agent for salt.
|
||||
|
||||
|
||||
Package: salt-syndic
|
||||
Architecture: all
|
||||
Depends: ${python:Depends},
|
||||
${misc:Depends},
|
||||
salt-master
|
||||
Description: salt-syndic represents the master-of-masters for salt
|
||||
This package is a powerful remote execution manager that can be used
|
||||
to administer servers in a fast and efficient way.
|
||||
.
|
||||
It allows commands to be executed across large groups of servers. This
|
||||
means systems can be easily managed, but data can also be easily gathered.
|
||||
Quick introspection into running systems becomes a reality.
|
||||
.
|
||||
Remote execution is usually used to set up a certain state on a remote
|
||||
system. Salt addresses this problem as well, the salt state system uses
|
||||
salt state files to define the state a server needs to be in.
|
||||
.
|
||||
Between the remote execution system, and state management Salt addresses
|
||||
the backbone of cloud and data center management.
|
||||
.
|
||||
This particular package provides the master of masters for salt-- it enables the management
|
||||
of multiple masters at a time.
|
||||
|
|
1
debian/files
vendored
1
debian/files
vendored
|
@ -1 +0,0 @@
|
|||
salt_0.9.4-1_all.deb admin optional
|
8
debian/links
vendored
Normal file
8
debian/links
vendored
Normal file
|
@ -0,0 +1,8 @@
|
|||
usr/share/salt/salt /usr/bin/salt
|
||||
usr/share/salt/salt-master /usr/bin/salt-master
|
||||
usr/share/salt/salt-syndic /usr/bin/salt-syndic
|
||||
usr/share/salt/salt-cp /usr/bin/salt-cp
|
||||
usr/share/salt/salt-key /usr/bin/salt-key
|
||||
usr/share/salt/salt-run /usr/bin/salt-run
|
||||
usr/share/salt/salt-minion /usr/bin/salt-minion
|
||||
usr/share/salt/salt-call /usr/bin/salt-call
|
13
debian/patches/no-license
vendored
13
debian/patches/no-license
vendored
|
@ -1,13 +0,0 @@
|
|||
Index: salt-0.9.4/setup.py
|
||||
===================================================================
|
||||
--- salt-0.9.4.orig/setup.py 2011-10-03 21:07:32.524520895 -0600
|
||||
+++ salt-0.9.4/setup.py 2011-10-03 21:14:33.852854281 -0600
|
||||
@@ -92,7 +92,7 @@
|
||||
['salt/modules/cytest.pyx',
|
||||
]),
|
||||
(doc_path,
|
||||
- ['LICENSE'
|
||||
+ [
|
||||
]),
|
||||
(template_path,
|
||||
['doc/example/templates/yaml-jinja.yml',
|
1
debian/patches/series
vendored
1
debian/patches/series
vendored
|
@ -1 +0,0 @@
|
|||
no-license
|
25
debian/rules
vendored
25
debian/rules
vendored
|
@ -1,18 +1,15 @@
|
|||
#!/usr/bin/make -f
|
||||
# -*- makefile -*-
|
||||
# Sample debian/rules that uses debhelper.
|
||||
# This file was originally written by Joey Hess and Craig Small.
|
||||
# As a special exception, when this file is copied by dh-make into a
|
||||
# dh-make output file, you may use that output file without restriction.
|
||||
# This special exception was added by Craig Small in version 0.37 of dh-make.
|
||||
|
||||
# Uncomment this to turn on verbose mode.
|
||||
#export DH_VERBOSE=1
|
||||
|
||||
%:
|
||||
dh $@
|
||||
dh $@
|
||||
|
||||
override_dh_installinit:
|
||||
dh_installinit --no-start --name="salt-master"
|
||||
dh_installinit --no-start --name="salt-minion"
|
||||
dh_installinit --no-start --name="salt-syndic"
|
||||
#override_dh_installinit:
|
||||
# dh_installinit --no-start --name="salt-master"
|
||||
# dh_installinit --no-start --name="salt-minion"
|
||||
# dh_installinit --no-start --name="salt-syndic"
|
||||
|
||||
get-orig-source:
|
||||
git clone https://github.com/saltstack/salt.git
|
||||
mv salt salt-0.9.5
|
||||
tar -zcvf salt_0.9.5.orig.tar.gz --exclude "debian*" --exclude-vcs salt-0.9.5
|
||||
rm -rf salt-0.9.5
|
||||
|
|
9
debian/salt-common.install
vendored
Normal file
9
debian/salt-common.install
vendored
Normal file
|
@ -0,0 +1,9 @@
|
|||
usr/share/man/man1/salt-minion.1
|
||||
usr/share/man/man1/salt-call.1
|
||||
usr/share/man/man1/salt-key.1
|
||||
usr/share/man/man1/salt-master.1
|
||||
usr/share/man/man1/salt-syndic.1
|
||||
usr/share/man/man1/salt-cp.1
|
||||
usr/share/man/man1/salt.1
|
||||
conf/minion /etc/salt/minion
|
||||
salt/* /usr/share/salt/
|
|
@ -29,7 +29,7 @@ DEBIAN_VERSION=/etc/debian_version
|
|||
SUSE_RELEASE=/etc/SuSE-release
|
||||
# Source function library.
|
||||
if [ -f $DEBIAN_VERSION ]; then
|
||||
break
|
||||
break
|
||||
elif [ -f $SUSE_RELEASE -a -r /etc/rc.status ]; then
|
||||
. /etc/rc.status
|
||||
else
|
||||
|
@ -46,11 +46,11 @@ PROC_LIST=""
|
|||
RETVAL=0
|
||||
|
||||
findproc() {
|
||||
PROC_LIST=`$PS_CMD | grep $PROCESS | grep -v grep | grep -v sh | grep -v vi | awk '{ print $1 }'`
|
||||
PROC_LIST=`$PS_CMD | grep 'bin/python.*salt-master' | grep -v grep | grep -v sh | grep -v vi | awk '{ print $1 }'`
|
||||
}
|
||||
|
||||
start() {
|
||||
echo -n $"Starting salt-master daemon: "
|
||||
echo -n "Starting salt-master daemon: "
|
||||
if [ -f $SUSE_RELEASE ]; then
|
||||
startproc -f -p /var/run/$SERVICE.pid /usr/bin/salt-master -d $CONFIG_ARGS
|
||||
rc_status -v
|
||||
|
@ -72,7 +72,7 @@ start() {
|
|||
}
|
||||
|
||||
stop() {
|
||||
echo -n $"Stopping salt-master daemon: "
|
||||
echo -n "Stopping salt-master daemon: "
|
||||
if [ -f $SUSE_RELEASE ]; then
|
||||
killproc -TERM /usr/bin/salt-master
|
||||
rc_status -v
|
||||
|
@ -131,7 +131,7 @@ case "$1" in
|
|||
RETVAL=$?
|
||||
;;
|
||||
*)
|
||||
echo $"Usage: $0 {start|stop|status|restart|condrestart|reload|force-reload}"
|
||||
echo "Usage: $0 {start|stop|status|restart|condrestart|reload|force-reload}"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
1
debian/salt-master.install
vendored
Normal file
1
debian/salt-master.install
vendored
Normal file
|
@ -0,0 +1 @@
|
|||
conf/master /etc/salt/master
|
6
debian/salt-master.manpages
vendored
Normal file
6
debian/salt-master.manpages
vendored
Normal file
|
@ -0,0 +1,6 @@
|
|||
doc/man/salt.7
|
||||
doc/man/salt.1
|
||||
doc/man/salt-master.1
|
||||
doc/man/salt-key.1
|
||||
doc/man/salt-cp.1
|
||||
doc/man/salt-run.1
|
|
@ -29,7 +29,7 @@ DEBIAN_VERSION=/etc/debian_version
|
|||
SUSE_RELEASE=/etc/SuSE-release
|
||||
# Source function library.
|
||||
if [ -f $DEBIAN_VERSION ]; then
|
||||
break
|
||||
break
|
||||
elif [ -f $SUSE_RELEASE -a -r /etc/rc.status ]; then
|
||||
. /etc/rc.status
|
||||
else
|
||||
|
@ -46,18 +46,18 @@ PROC_LIST=""
|
|||
RETVAL=0
|
||||
|
||||
findproc() {
|
||||
PROC_LIST=`$PS_CMD | grep $PROCESS | grep -v grep | grep -v sh | grep -v vi | awk '{ print $1 }'`
|
||||
PROC_LIST=`$PS_CMD | grep 'bin/python.*salt-minion' | grep -v grep | grep -v sh | grep -v vi | awk '{ print $1 }'`
|
||||
}
|
||||
|
||||
start() {
|
||||
echo -n $"Starting salt-minion daemon: "
|
||||
echo -n "Starting salt-minion daemon: "
|
||||
if [ -f $SUSE_RELEASE ]; then
|
||||
startproc -f -p /var/run/$SERVICE.pid /usr/bin/salt-minion -d $CONFIG_ARGS
|
||||
rc_status -v
|
||||
elif [ -e $DEBIAN_VERSION ]; then
|
||||
findproc
|
||||
if [ -n "$PROC_LIST" ]; then
|
||||
echo -n "already started, lock file found"
|
||||
echo -n "already started, lock file found"
|
||||
RETVAL=1
|
||||
elif /usr/bin/python /usr/bin/salt-minion -d; then
|
||||
echo -n "OK"
|
||||
|
@ -72,7 +72,7 @@ start() {
|
|||
}
|
||||
|
||||
stop() {
|
||||
echo -n $"Stopping salt-minion daemon: "
|
||||
echo -n "Stopping salt-minion daemon: "
|
||||
if [ -f $SUSE_RELEASE ]; then
|
||||
killproc -TERM /usr/bin/salt-minion
|
||||
rc_status -v
|
||||
|
@ -131,7 +131,7 @@ case "$1" in
|
|||
RETVAL=$?
|
||||
;;
|
||||
*)
|
||||
echo $"Usage: $0 {start|stop|status|restart|condrestart|reload|force-reload}"
|
||||
echo "Usage: $0 {start|stop|status|restart|condrestart|reload|force-reload}"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
4
debian/salt-minion.install
vendored
Normal file
4
debian/salt-minion.install
vendored
Normal file
|
@ -0,0 +1,4 @@
|
|||
scripts/salt-minion /usr/share/salt/salt-minion
|
||||
scripts/salt-call /usr/share/salt/salt-call
|
||||
modules/* /usr/share/salt/modules/
|
||||
conf/minion /etc/salt/minion
|
2
debian/salt-minion.manpages
vendored
Normal file
2
debian/salt-minion.manpages
vendored
Normal file
|
@ -0,0 +1,2 @@
|
|||
doc/man/salt-call.1
|
||||
doc/man/salt-minion.1
|
|
@ -17,7 +17,7 @@
|
|||
|
||||
# chkconfig header
|
||||
|
||||
# chkconfig: 2345 99 01
|
||||
# chkconfig: 2345 99 01
|
||||
# description: This is a daemon that controls the salt mininons
|
||||
#
|
||||
# processname: /usr/bin/salt-syndic
|
||||
|
@ -29,7 +29,7 @@ DEBIAN_VERSION=/etc/debian_version
|
|||
SUSE_RELEASE=/etc/SuSE-release
|
||||
# Source function library.
|
||||
if [ -f $DEBIAN_VERSION ]; then
|
||||
break
|
||||
break
|
||||
elif [ -f $SUSE_RELEASE -a -r /etc/rc.status ]; then
|
||||
. /etc/rc.status
|
||||
else
|
||||
|
@ -46,18 +46,18 @@ PROC_LIST=""
|
|||
RETVAL=0
|
||||
|
||||
findproc() {
|
||||
PROC_LIST=`$PS_CMD | grep $PROCESS | grep -v grep | grep -v sh | grep -v vi | awk '{ print $1 }'`
|
||||
PROC_LIST=`$PS_CMD | grep 'bin/python.*salt-syndic' | grep -v grep | grep -v sh | grep -v vi | awk '{ print $1 }'`
|
||||
}
|
||||
|
||||
start() {
|
||||
echo -n $"Starting salt-syndic daemon: "
|
||||
echo -n "Starting salt-syndic daemon: "
|
||||
if [ -f $SUSE_RELEASE ]; then
|
||||
startproc -f -p /var/run/$SERVICE.pid /usr/bin/salt-syndic -d $CONFIG_ARGS
|
||||
rc_status -v
|
||||
elif [ -e $DEBIAN_VERSION ]; then
|
||||
findproc
|
||||
if [ -n "$PROC_LIST" ]; then
|
||||
echo -n "already started, lock file found"
|
||||
echo -n "already started, lock file found"
|
||||
RETVAL=1
|
||||
elif /usr/bin/python /usr/bin/salt-syndic -d; then
|
||||
echo -n "OK"
|
||||
|
@ -72,7 +72,7 @@ start() {
|
|||
}
|
||||
|
||||
stop() {
|
||||
echo -n $"Stopping salt-syndic daemon: "
|
||||
echo -n "Stopping salt-syndic daemon: "
|
||||
if [ -f $SUSE_RELEASE ]; then
|
||||
killproc -TERM /usr/bin/salt-syndic
|
||||
rc_status -v
|
||||
|
@ -127,7 +127,7 @@ case "$1" in
|
|||
RETVAL=$?
|
||||
;;
|
||||
*)
|
||||
echo $"Usage: $0 {start|stop|status|restart|reload|force-reload}"
|
||||
echo "Usage: $0 {start|stop|status|restart|reload|force-reload}"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
0
debian/salt-syndic.install
vendored
Normal file
0
debian/salt-syndic.install
vendored
Normal file
1
debian/salt-syndic.manpages
vendored
Normal file
1
debian/salt-syndic.manpages
vendored
Normal file
|
@ -0,0 +1 @@
|
|||
doc/man/salt-syndic.1
|
48
debian/salt.debhelper.log
vendored
48
debian/salt.debhelper.log
vendored
|
@ -1,48 +0,0 @@
|
|||
dh_auto_configure
|
||||
dh_auto_build
|
||||
dh_auto_test
|
||||
dh_prep
|
||||
dh_installdirs
|
||||
dh_auto_install
|
||||
dh_install
|
||||
dh_installdocs
|
||||
dh_installchangelogs
|
||||
dh_installexamples
|
||||
dh_installman
|
||||
dh_installcatalogs
|
||||
dh_installcron
|
||||
dh_installdebconf
|
||||
dh_installemacsen
|
||||
dh_installifupdown
|
||||
dh_installinfo
|
||||
dh_pysupport
|
||||
override_dh_installinit dh_installinit
|
||||
override_dh_installinit dh_installinit
|
||||
override_dh_installinit dh_installinit
|
||||
dh_installinit
|
||||
dh_installmenu
|
||||
dh_installmime
|
||||
dh_installmodules
|
||||
dh_installlogcheck
|
||||
dh_installlogrotate
|
||||
dh_installpam
|
||||
dh_installppp
|
||||
dh_installudev
|
||||
dh_installwm
|
||||
dh_installxfonts
|
||||
dh_installgsettings
|
||||
dh_bugfiles
|
||||
dh_ucf
|
||||
dh_lintian
|
||||
dh_gconf
|
||||
dh_icons
|
||||
dh_perl
|
||||
dh_usrlocal
|
||||
dh_link
|
||||
dh_compress
|
||||
dh_fixperms
|
||||
dh_installdeb
|
||||
dh_gencontrol
|
||||
dh_md5sums
|
||||
dh_builddeb
|
||||
dh_builddeb
|
20
debian/salt.postinst.debhelper
vendored
20
debian/salt.postinst.debhelper
vendored
|
@ -1,20 +0,0 @@
|
|||
# Automatically added by dh_pysupport
|
||||
if which update-python-modules >/dev/null 2>&1; then
|
||||
update-python-modules salt.public
|
||||
fi
|
||||
# End automatically added section
|
||||
# Automatically added by dh_installinit
|
||||
if [ -x "/etc/init.d/salt-master" ]; then
|
||||
update-rc.d salt-master defaults >/dev/null || exit $?
|
||||
fi
|
||||
# End automatically added section
|
||||
# Automatically added by dh_installinit
|
||||
if [ -x "/etc/init.d/salt-minion" ]; then
|
||||
update-rc.d salt-minion defaults >/dev/null || exit $?
|
||||
fi
|
||||
# End automatically added section
|
||||
# Automatically added by dh_installinit
|
||||
if [ -x "/etc/init.d/salt-syndic" ]; then
|
||||
update-rc.d salt-syndic defaults >/dev/null || exit $?
|
||||
fi
|
||||
# End automatically added section
|
15
debian/salt.postrm.debhelper
vendored
15
debian/salt.postrm.debhelper
vendored
|
@ -1,15 +0,0 @@
|
|||
# Automatically added by dh_installinit
|
||||
if [ "$1" = "purge" ] ; then
|
||||
update-rc.d salt-syndic remove >/dev/null
|
||||
fi
|
||||
# End automatically added section
|
||||
# Automatically added by dh_installinit
|
||||
if [ "$1" = "purge" ] ; then
|
||||
update-rc.d salt-minion remove >/dev/null
|
||||
fi
|
||||
# End automatically added section
|
||||
# Automatically added by dh_installinit
|
||||
if [ "$1" = "purge" ] ; then
|
||||
update-rc.d salt-master remove >/dev/null
|
||||
fi
|
||||
# End automatically added section
|
5
debian/salt.prerm.debhelper
vendored
5
debian/salt.prerm.debhelper
vendored
|
@ -1,5 +0,0 @@
|
|||
# Automatically added by dh_pysupport
|
||||
if which update-python-modules >/dev/null 2>&1; then
|
||||
update-python-modules -c salt.public
|
||||
fi
|
||||
# End automatically added section
|
3
debian/salt.substvars
vendored
3
debian/salt.substvars
vendored
|
@ -1,3 +0,0 @@
|
|||
python:Versions=2.6, 2.7
|
||||
python:Depends=python, python-support (>= 0.90.0)
|
||||
misc:Depends=
|
10
doc/_templates/indexsidebar.html
vendored
10
doc/_templates/indexsidebar.html
vendored
|
@ -13,8 +13,10 @@
|
|||
#}
|
||||
{% endif %}
|
||||
|
||||
<p>Notice something different? We moved the Salt repository to the new saltstack GitHub organization.
|
||||
<a href="http://red45.wordpress.com/2011/11/15/little-move-big-progress/">Read why.</a>
|
||||
<h3>Recent updates</h3>
|
||||
<p>Our IRC channel is now on the popular Freenode network. See you there!</p>
|
||||
<p>The Salt git repository can now be found at the new saltstack GitHub organization.
|
||||
<a href="http://red45.wordpress.com/2011/11/15/little-move-big-progress/">Read why.</a></p>
|
||||
|
||||
<h3>Get help. Get involved.</h3>
|
||||
|
||||
|
@ -25,6 +27,6 @@
|
|||
<button type="submit" name="sub">Subscribe</button></p>
|
||||
</form>
|
||||
|
||||
<p>Join us via IRC in the <tt>#salt</tt> channel on
|
||||
<a href="http://www.oftc.net/oftc/">OFTC</a>.</p>
|
||||
<p>Join us via IRC in the <tt>#salt</tt> channel via
|
||||
<a href="http://webchat.freenode.net/?channels=salt&uio=Mj10cnVlJjk9dHJ1ZSYxMD10cnVl83">Freenode's webchat</a>.</p>
|
||||
<p><a href="{{ github_issues }}">Search bugs</a> or file one.</p>
|
||||
|
|
|
@ -37,7 +37,7 @@ salt \(aq*\(aq [ options ] sys.doc
|
|||
.sp
|
||||
salt \-E \(aq.*\(aq [ options ] sys.doc cmd
|
||||
.sp
|
||||
salt \-F \(aqoperatingsystem:Arch.*\(aq [ options ] test.ping
|
||||
salt \-G \(aqos:Arch.*\(aq [ options ] test.ping
|
||||
.sp
|
||||
salt \-Q test.ping
|
||||
.UNINDENT
|
||||
|
|
|
@ -1912,7 +1912,7 @@ A module to wrap archive calls
|
|||
.INDENT 0.0
|
||||
.TP
|
||||
.B salt.modules.archive.gunzip(gzipfile)
|
||||
Uses the gzip command to create gzip files
|
||||
Uses the gunzip command to unpack gzip files
|
||||
.sp
|
||||
CLI Example to create \fB/tmp/sourcefile.txt\fP:
|
||||
.sp
|
||||
|
@ -2123,7 +2123,7 @@ CLI Example:
|
|||
.sp
|
||||
.nf
|
||||
.ft C
|
||||
salt \(aq*\(aq cat
|
||||
salt \(aq*\(aq cmd.has_exec cat
|
||||
.ft P
|
||||
.fi
|
||||
.UNINDENT
|
||||
|
@ -2175,7 +2175,7 @@ CLI Example:
|
|||
.sp
|
||||
.nf
|
||||
.ft C
|
||||
salt \(aq*\(aq cmd.run "ls \-l | grep foo | awk \(aq{print $2}\(aq"
|
||||
salt \(aq*\(aq cmd.run_stderr "ls \-l | grep foo | awk \(aq{print $2}\(aq"
|
||||
.ft P
|
||||
.fi
|
||||
.UNINDENT
|
||||
|
@ -2188,7 +2188,7 @@ CLI Example:
|
|||
.sp
|
||||
.nf
|
||||
.ft C
|
||||
salt \(aq*\(aq cmd.run "ls \-l | grep foo | awk \(aq{print $2}\(aq"
|
||||
salt \(aq*\(aq cmd.run_stdout "ls \-l | grep foo | awk \(aq{print $2}\(aq"
|
||||
.ft P
|
||||
.fi
|
||||
.UNINDENT
|
||||
|
@ -2615,7 +2615,7 @@ salt \(aq*\(aq /var/log name=\e*.[0\-9] mtime=+30d size=+10m delete
|
|||
.INDENT 0.0
|
||||
.TP
|
||||
.B salt.modules.file.get_gid(path)
|
||||
Return the user that owns a given file
|
||||
Return the id of the group that owns a given file
|
||||
.sp
|
||||
CLI Example:
|
||||
.sp
|
||||
|
@ -2628,7 +2628,7 @@ salt \(aq*\(aq file.get_gid /etc/passwd
|
|||
.INDENT 0.0
|
||||
.TP
|
||||
.B salt.modules.file.get_group(path)
|
||||
Return the user that owns a given file
|
||||
Return the group that owns a given file
|
||||
.sp
|
||||
CLI Example:
|
||||
.sp
|
||||
|
@ -2668,7 +2668,7 @@ salt \(aq*\(aq /etc/passwd sha512
|
|||
.INDENT 0.0
|
||||
.TP
|
||||
.B salt.modules.file.get_uid(path)
|
||||
Return the user that owns a given file
|
||||
Return the id of the user that owns a given file
|
||||
.sp
|
||||
CLI Example:
|
||||
.sp
|
||||
|
@ -2720,7 +2720,7 @@ salt \(aq*\(aq file.group_to_gid root
|
|||
.INDENT 0.0
|
||||
.TP
|
||||
.B salt.modules.file.set_mode(path, mode)
|
||||
Set the more of a file
|
||||
Set the mode of a file
|
||||
.sp
|
||||
CLI Example:
|
||||
.sp
|
||||
|
@ -2746,7 +2746,7 @@ salt \(aq*\(aq file.uid_to_user 0
|
|||
.INDENT 0.0
|
||||
.TP
|
||||
.B salt.modules.file.user_to_uid(user)
|
||||
Convert user name to a gid
|
||||
Convert user name to a uid
|
||||
.sp
|
||||
CLI Example:
|
||||
.sp
|
||||
|
@ -2940,13 +2940,13 @@ salt \(aq*\(aq group.add foo 3456
|
|||
.INDENT 0.0
|
||||
.TP
|
||||
.B salt.modules.groupadd.chgid(name, gid)
|
||||
Change the default shell of the user
|
||||
Change the gid for a named group
|
||||
.sp
|
||||
CLI Example:
|
||||
.sp
|
||||
.nf
|
||||
.ft C
|
||||
salt \(aq*\(aq user.chshell foo /bin/zsh
|
||||
salt \(aq*\(aq group.chgid foo 4376
|
||||
.ft P
|
||||
.fi
|
||||
.UNINDENT
|
||||
|
@ -3649,13 +3649,13 @@ salt \(aq*\(aq group.add foo 3456
|
|||
.INDENT 0.0
|
||||
.TP
|
||||
.B salt.modules.pw_group.chgid(name, gid)
|
||||
Change the default shell of the user
|
||||
Change the gid for a named group
|
||||
.sp
|
||||
CLI Example:
|
||||
.sp
|
||||
.nf
|
||||
.ft C
|
||||
salt \(aq*\(aq user.chshell foo /bin/zsh
|
||||
salt \(aq*\(aq group.chgid foo 4376
|
||||
.ft P
|
||||
.fi
|
||||
.UNINDENT
|
||||
|
@ -8369,7 +8369,7 @@ salt \(aq*\(aq [ options ] sys.doc
|
|||
.sp
|
||||
salt \-E \(aq.*\(aq [ options ] sys.doc cmd
|
||||
.sp
|
||||
salt \-F \(aqoperatingsystem:Arch.*\(aq [ options ] test.ping
|
||||
salt \-G \(aqos:Arch.*\(aq [ options ] test.ping
|
||||
.sp
|
||||
salt \-Q test.ping
|
||||
.UNINDENT
|
||||
|
|
|
@ -91,6 +91,27 @@ on the return of the primary function the main function is executed.
|
|||
Execution matching allows for matching minions based on any arbitrairy running
|
||||
data on tne minions.
|
||||
|
||||
Compound Targeting
|
||||
``````````````````
|
||||
|
||||
.. versionadded:: 0.9.5
|
||||
|
||||
Multiple target interfaces can be used in conjunction to determine the command
|
||||
targets. These targets can then be combined using and or or statements. This
|
||||
is well defined with an example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt -C 'G@os:Debian and webser* or E@db.*' test.ping
|
||||
|
||||
in this example any minion who's id starts with webser and is running Debian,
|
||||
or any minion who's id starts with db will be matched.
|
||||
|
||||
The type of matcher defaults to glob, but can be specified with the
|
||||
corresponding letter followed by the @ symbol. In the above example a grain is
|
||||
used with G@ as well as a regular expression with E@. The webser* target does
|
||||
not need to be prefaced with a target type specifier because it is a glob.
|
||||
|
||||
Calling the Function
|
||||
--------------------
|
||||
|
||||
|
|
|
@ -9,7 +9,9 @@ Synopsis
|
|||
|
||||
salt -E '.*' [ options ] sys.doc cmd
|
||||
|
||||
salt -F 'operatingsystem:Arch.*' [ options ] test.ping
|
||||
salt -G 'os:Arch.*' [ options ] test.ping
|
||||
|
||||
salt -C 'G@os:Arch.* and webserv* or G@kernel:FreeBSD' [ options ] test.ping
|
||||
|
||||
salt -Q test.ping
|
||||
|
||||
|
@ -49,6 +51,14 @@ Options
|
|||
the minions. The target expression is in the format of '<grain value>:<pcre
|
||||
regular expression>'; example: 'os:Arch.*'
|
||||
|
||||
.. option:: -C, --compound
|
||||
|
||||
Utilize many target definitions to make the call very granular. This option
|
||||
takes a group of targets seperated by and or or. The default matcher is a
|
||||
glob as usual, if something other than a glob is used preface it with the
|
||||
letter denoting the type, example: 'webserv* and G@os:Debian or E@db.*'
|
||||
make sure that the compount target is encapsultaed in quotes.
|
||||
|
||||
.. option:: -Q, --query
|
||||
|
||||
Execute a salt command query, this can be used to find the results os a
|
||||
|
|
|
@ -7,7 +7,7 @@ Salt runners are convenience applications executed with the salt-run command.
|
|||
A Salt runner can be a simple client call, or a complex application.
|
||||
|
||||
The use for a salt running is to build a frontend hook for running sets of
|
||||
commands via salt of creating special formatted output.
|
||||
commands via salt or creating special formatted output.
|
||||
|
||||
Writing Salt Runners
|
||||
--------------------
|
||||
|
|
51
doc/ref/states/writing.rst
Normal file
51
doc/ref/states/writing.rst
Normal file
|
@ -0,0 +1,51 @@
|
|||
=============
|
||||
State Modules
|
||||
=============
|
||||
|
||||
State Modules are the components that map to actual enforcement and management
|
||||
of salt states.
|
||||
|
||||
States are - Easy to Write!
|
||||
============================
|
||||
|
||||
State Modules should be easy to write and straightforward. The information
|
||||
passed to the SLS data structures will map directly to the states modules.
|
||||
|
||||
Mapping the information from the SLS data is simple, this example should
|
||||
illustrate:
|
||||
|
||||
SLS file
|
||||
.. code-block:: yaml
|
||||
|
||||
/etc/salt/master: # maps to "name"
|
||||
file: # maps to State module filename eg https://github.com/saltstack/salt/blob/develop/salt/states/file.py
|
||||
- managed # maps to the managed function in the file State module
|
||||
- user: root # one of many options passed to the manage function
|
||||
- group: root
|
||||
- mode: 644
|
||||
- source: salt://salt/master
|
||||
|
||||
Therefore this SLS data can be directly linked to a module, function and
|
||||
arguments passed to that function.
|
||||
|
||||
This does issue the burden, that function names, state names and function
|
||||
arguments should be very human readable inside state modules, since they
|
||||
directly define the user interface.
|
||||
|
||||
Cross Calling Modules
|
||||
=====================
|
||||
|
||||
As with Execution Modules State Modules can also make use of the ``__salt__``
|
||||
and ``__grains__`` data.
|
||||
|
||||
It is important to note, that the real work of state management should not be
|
||||
done in the state module unless it is needed, a good example is the pkg state
|
||||
module. This module does not do any package management work, it just calls the
|
||||
pkg execution module. This makes the pkg state module completely generic, which
|
||||
is why there is only one pkg state module and many backend pkg execution
|
||||
modules.
|
||||
|
||||
On the other hand some modules will require that the logic be placed in the
|
||||
state module, a good example of this is the file module. But in the vast
|
||||
majority of cases this is not the best approach, and writing specific
|
||||
execution modules to do the backend work will be the optimal solution.
|
|
@ -26,14 +26,11 @@ http://groups.google.com/group/salt-users
|
|||
IRC
|
||||
===
|
||||
|
||||
The Salt IRC channel is hosted on the `OFTC`_ network. Connect to the OFTC
|
||||
server:
|
||||
The ``#salt`` IRC channel is hosted on the popular `Freenode`__ network. You
|
||||
can use the `Freenode webchat client`__ right from your browser.
|
||||
|
||||
irc://irc.oftc.net:6667
|
||||
|
||||
and join us in ``#salt``.
|
||||
|
||||
.. _`OFTC`: http://www.oftc.net/oftc/
|
||||
.. __: http://freenode.net/irc_servers.shtml
|
||||
.. __: http://webchat.freenode.net/?channels=salt&uio=Mj10cnVlJjk9dHJ1ZSYxMD10cnVl83
|
||||
|
||||
.. _community-github:
|
||||
|
||||
|
|
|
@ -40,45 +40,29 @@ running and the Salt :term:`minions <minion>` point to the master.
|
|||
Red Hat
|
||||
=======
|
||||
|
||||
Fedora
|
||||
------
|
||||
We are working to get Salt packages into EPEL. In the meantime you can
|
||||
:command:`yum install salt-master salt-minion` via our Fedora People
|
||||
repository.
|
||||
|
||||
Salt is currently being built for Fedora. The latest koji build pages can be
|
||||
found here:
|
||||
Red Hat Enterprise Linux 5 & 6 or CentOS 5 & 6
|
||||
----------------------------------------------
|
||||
|
||||
* `Fedora 14 <https://koji.fedoraproject.org/koji/taskinfo?taskID=3358221>`_
|
||||
* `Fedora 15 <https://koji.fedoraproject.org/koji/taskinfo?taskID=3358223>`_
|
||||
* `Fedora Rawhide <https://koji.fedoraproject.org/koji/taskinfo?taskID=3358219>`_
|
||||
1. Install the `EPEL`__ repository::
|
||||
|
||||
Red Hat Enterprise Linux 6
|
||||
--------------------------
|
||||
2. Install our repository on FedoraPeople::
|
||||
|
||||
Salt is being built for EPEL6. `Browse the latest builds.
|
||||
<https://koji.fedoraproject.org/koji/taskinfo?taskID=3358215>`_
|
||||
wget -O /etc/yum.repos.d/epel-salt.repo \
|
||||
http://repos.fedorapeople.org/repos/herlo/salt/epel-salt.repo
|
||||
|
||||
The ZeroMQ packages in EPEL6 have been tested with this package, but if you
|
||||
still have issues these backports may help:
|
||||
.. __: http://fedoraproject.org/wiki/EPEL#How_can_I_use_these_extra_packages.3F
|
||||
|
||||
* :download:`ZeroMQ backport <zeromq-2.1.7-1.el6.x86_64.rpm>`
|
||||
* :download:`pyzmq bindings backport <python-zmq-2.1.7-1.el6.src.rpm>`
|
||||
* `Package to set up EPEL repository
|
||||
<http://download.fedoraproject.org/pub/epel/6/i386/epel-release-6-5.noarch.rpm>`_
|
||||
(provided by the EPEL project)
|
||||
|
||||
Red Hat Enterprise Linux 5
|
||||
--------------------------
|
||||
Fedora 15 & 16
|
||||
--------------
|
||||
|
||||
Salt is being built for RHEL5, updates will be available soon!
|
||||
1. Install our repository on FedoraPeople::
|
||||
|
||||
Red Hat Enterprise Linux 5 requires more backports and the use of the Python
|
||||
2.6 stack provided in the EPEL repository. All of the listed packages need to
|
||||
be installed and the EPEL repository enabled to bring in the needed
|
||||
dependencies:
|
||||
|
||||
* :download:`Salt rpm <salt-0.8.9-1.el5.noarch.rpm>`
|
||||
* :download:`YAML bindings for Python 2.6 <python26-PyYAML-3.08-4.el5.x86_64.rpm>`
|
||||
* :download:`ZeroMQ backport <zeromq-2.1.7-1.el5.x86_64.rpm>`
|
||||
* :download:`pyzmq bindings backport <python26-zmq-2.1.7-1.el5.x86_64.rpm>`
|
||||
wget -O /etc/yum.repos.d/fedora-salt.repo \
|
||||
http://repos.fedorapeople.org/repos/herlo/salt/fedora-salt.repo
|
||||
|
||||
Arch Linux
|
||||
==========
|
||||
|
@ -94,46 +78,68 @@ Debian / Ubuntu
|
|||
Ubuntu
|
||||
------
|
||||
|
||||
A PPA is available until we can get packages into apt::
|
||||
We are working to get Salt into apt. In the meantime we have a PPA available
|
||||
for Lucid::
|
||||
|
||||
aptitude -y install python-software-properties
|
||||
add-apt-repository ppa:chris-lea/libpgm
|
||||
add-apt-repository ppa:chris-lea/zeromq
|
||||
add-apt-repository ppa:saltstack/salt
|
||||
aptitude update
|
||||
aptitude install salt
|
||||
|
||||
.. admonition:: Installing ZeroMQ on Ubuntu Lucid (10.04 LTS)
|
||||
|
||||
The ZeroMQ package is available starting with Maverick but there are `PPA
|
||||
packages available for Lucid`_ for both ZeroMQ and pyzmq. You will need to
|
||||
also enable the following PPAs before running the commands above::
|
||||
|
||||
add-apt-repository ppa:chris-lea/libpgm
|
||||
add-apt-repository ppa:chris-lea/zeromq
|
||||
|
||||
.. _`PPA packages available for Lucid`: https://launchpad.net/~chris-lea/+archive/zeromq
|
||||
|
||||
Debian
|
||||
------
|
||||
|
||||
`A deb package is currently in testing`__. Until that is accepted you can
|
||||
install Salt via :command:`easy_install` or :command:`pip`::
|
||||
`A deb package is currently in testing`__ for inclusion in apt. Until that is
|
||||
accepted you can install Salt by downloading the latest ``.deb`` in the
|
||||
`downloads section on GitHub`__ and installing that manually:
|
||||
|
||||
pip install salt
|
||||
.. parsed-literal::
|
||||
|
||||
dpkg -i salt-|version|.deb
|
||||
|
||||
.. __: http://mentors.debian.net/package/salt
|
||||
.. __: https://github.com/saltstack/salt/downloads
|
||||
|
||||
.. admonition:: Installing ZeroMQ on Squeeze (Debian 6)
|
||||
|
||||
ZeroMQ packages are available in squeeze-backports.
|
||||
There is a `python-zmq`__ package available in Debian "wheezy (testing)".
|
||||
If you don't have that repo enabled the best way to install Salt and pyzmq
|
||||
is by using :command:`pip` (or :command:`easy_install`)::
|
||||
|
||||
1. Add the following line to your :file:`/etc/apt/sources.list`::
|
||||
pip install pyzmq salt
|
||||
|
||||
deb http://backports.debian.org/debian-backports squeeze-backports main
|
||||
.. __: http://packages.debian.org/search?keywords=python-zmq
|
||||
|
||||
2. Run::
|
||||
Gentoo
|
||||
======
|
||||
|
||||
aptitude update
|
||||
aptitude install libzmq1 python-zmq
|
||||
Salt can be easily installed on Gentoo::
|
||||
|
||||
emerge pyyaml m2crypto pycrypto jinja pyzmq
|
||||
|
||||
Then download and install from source:
|
||||
|
||||
1. Download the latest source tarball from the GitHub downloads directory for
|
||||
the Salt project: |latest|
|
||||
|
||||
2. Untar the tarball and run the :file:`setup.py` as root:
|
||||
|
||||
.. parsed-literal::
|
||||
|
||||
tar xvf salt-|version|.tar.gz
|
||||
cd salt-|version|
|
||||
python2 setup.py install
|
||||
|
||||
FreeBSD
|
||||
=======
|
||||
|
||||
Salt is available in the FreeBSD ports tree::
|
||||
|
||||
cd /usr/ports/sysutils/salt && make install clean
|
||||
|
||||
.. seealso:: :doc:`freebsd installation guide </topics/tutorials/freebsd>`
|
||||
|
||||
Installing from source
|
||||
======================
|
||||
|
|
|
@ -127,6 +127,7 @@ set the order to last:
|
|||
pkg:
|
||||
- installed
|
||||
- order: last
|
||||
|
||||
Substantial testing has gone into the state system and it is ready for real
|
||||
world usage. A great deal has been added to the documentation for states and
|
||||
the modules and functions available to states have been cleanly documented.
|
||||
|
|
245
doc/topics/tutorials/freebsd.rst
Normal file
245
doc/topics/tutorials/freebsd.rst
Normal file
|
@ -0,0 +1,245 @@
|
|||
.. _introduction:
|
||||
|
||||
Introduction
|
||||
============
|
||||
|
||||
Salt was added to the FreeBSD ports tree Dec 26th, 2011 by Christer Edwards
|
||||
<christer.edwards@gmail.com>. It has been tested on FreeBSD 8.2 and 9.0RC
|
||||
releases.
|
||||
|
||||
Salt is dependent on the following additional ports. These will be installed as
|
||||
dependencies of the ``sysutils/salt`` port::
|
||||
|
||||
/devel/py-yaml
|
||||
/devel/py-pyzmq
|
||||
/devel/py-Jinja2
|
||||
/security/py-pycrypto
|
||||
/security/py-m2crypto
|
||||
|
||||
.. _installation:
|
||||
|
||||
Installation
|
||||
============
|
||||
|
||||
To install Salt from the FreeBSD ports tree, use the command::
|
||||
|
||||
cd /usr/ports/sysutils/salt && make install clean
|
||||
|
||||
Once the port is installed you'll need to make a few configuration changes.
|
||||
These include defining the IP to bind to (optional), and some configuration
|
||||
path changes to make salt fit more natively into the FreeBSD filesystem tree.
|
||||
|
||||
.. _configuration:
|
||||
|
||||
Configuration
|
||||
=============
|
||||
|
||||
In the sections below I'll outline configuration options for both the Salt
|
||||
Master and Salt Minions.
|
||||
|
||||
The Salt port installs two sample configuration files, salt/master.sample and
|
||||
salt/minion.sample (these should be installed in /usr/local/etc/, unless you use a
|
||||
different %%PREFIX%%). You'll need to copy these .sample files into place and
|
||||
make a few edits. First, copy them into place as seen here::
|
||||
|
||||
cp /usr/local/etc/salt/master.sample /usr/local/etc/salt/master
|
||||
cp /usr/local/etc/salt/minion.sample /usr/local/etc/salt/minion
|
||||
|
||||
Note: You'll only need to copy the config for the service you're going to run.
|
||||
|
||||
Once you've copied the config into place you'll need to make changes specific
|
||||
to your setup. Below I'll outline suggested configuration changes to the
|
||||
Master, after which I'll outline configuring the Minion.
|
||||
|
||||
.. _master_configuration:
|
||||
|
||||
Master Configuration
|
||||
====================
|
||||
|
||||
This section outlines configuration of a Salt Master, which is used to control
|
||||
other machines known as "minions" (see "Minion Configuration" for instructions
|
||||
on configuring a minion). This will outline IP configuration, and a few key
|
||||
configuration paths.
|
||||
|
||||
**Interface**
|
||||
|
||||
By default the Salt master listens on ports 4505 and 4506 on all interfaces
|
||||
(0.0.0.0). If you have a need to bind Salt to a specific IP, redefine the
|
||||
"interface" directive as seen here::
|
||||
|
||||
- #interface: 0.0.0.0
|
||||
+ interface: 10.0.0.1
|
||||
|
||||
**pki_dir**
|
||||
|
||||
Salt is primarily developed on Linux, and as such carries some Linux-isms in
|
||||
its development and configuration. These are all very easily remedied to more
|
||||
seamlessly fit into FreeBSD. One such configuration option is the *pki_dir:*
|
||||
directive. To ensure all of Salts files end up where you expect, you'll likely
|
||||
want to update this line as seen here::
|
||||
|
||||
- #pki_dir: /etc/salt/pki
|
||||
+ pki_dir: /usr/local/etc/salt/pki
|
||||
|
||||
**file_roots**
|
||||
|
||||
Finally, if you plan on using Salts state-management features, you'll want to
|
||||
update the *file_roots:* directive. This directive defines the location of the
|
||||
state files. I suggest updating this directive as seen here::
|
||||
|
||||
- #file_roots:
|
||||
- # base:
|
||||
- # - /srv/salt
|
||||
+ file_roots:
|
||||
+ base:
|
||||
+ - /usr/local/etc/salt/states
|
||||
|
||||
**rc.conf**
|
||||
|
||||
Last but not least you'll need to activate the Salt Master in your rc.conf
|
||||
file. Using your favorite editor, open /etc/rc.conf or /etc/rc.conf.local and
|
||||
add this line::
|
||||
|
||||
+ salt_master_enable="YES"
|
||||
|
||||
Once you've completed all of these steps you're ready to start your Salt
|
||||
Master. The Salt port installs an rc script which should be used to manage your
|
||||
Salt Master. You should be able to start your Salt Master now using the command
|
||||
seen here::
|
||||
|
||||
service salt_master start
|
||||
|
||||
If your Salt Master doesn't start successfully, go back through each step and
|
||||
see if anything was missed. Salt doesn't take much configuration (part of its
|
||||
beauty!), and errors are usually simple mistakes.
|
||||
|
||||
.. _ minion_configuration:
|
||||
|
||||
Minion Configuration
|
||||
====================
|
||||
|
||||
Configuring a Salt Minion is surprisingly simple. Unless you have a real need
|
||||
for customizing your minion configuration (which there are plenty of options if
|
||||
you are so inclined!), there is one simple directive that needs to be updated.
|
||||
That option is the location of the master.
|
||||
|
||||
By default a Salt Minion will try to connect to the dns name "salt". If you
|
||||
have the ability to update DNS records for your domain you might create an A or
|
||||
CNAME record for "salt" that points to your Salt Master. If you are able to do
|
||||
this you likely can do without any minion configuration at all.
|
||||
|
||||
If you are not able to update DNS, you'll simply need to update one entry in
|
||||
the configuration file. Using your favorite editor, open the minion
|
||||
configuration file and update the "master" entry as seen here::
|
||||
|
||||
- #master: salt
|
||||
+ master: 10.0.0.1
|
||||
|
||||
Simply update the master directive to the IP or hostname of your Salt Master.
|
||||
Save your changes and you're ready to start your Salt Minion. Advanced
|
||||
configuration options are covered in another chapter.
|
||||
|
||||
**pki_dir**
|
||||
|
||||
Salt is primarily developed on Linux, and as such carries some Linux-isms in
|
||||
its development and configuration. These are all very easily remedied to more
|
||||
seamlessly fit into FreeBSD. One such configuration option is the *pki_dir:*
|
||||
directive. To ensure all of Salts files end up where you expect, you'll likely
|
||||
want to update this line as seen here::
|
||||
|
||||
- #pki_dir: /etc/salt/pki
|
||||
+ pki_dir: /usr/local/etc/salt/pki
|
||||
|
||||
**rc.conf**
|
||||
|
||||
Before you're able to start the Salt Minion you'll need to update your rc.conf
|
||||
file. Using your favorite editor open /etc/rc.conf or /etc/rc.conf.local and
|
||||
add this line::
|
||||
|
||||
+ salt_minion_enable="YES"
|
||||
|
||||
Once you've completed all of these steps you're ready to start your Salt
|
||||
Minion. The Salt port installs an rc script which should be used to manage your
|
||||
Salt Minion. You should be able to start your Salt Minion now using the command
|
||||
seen here::
|
||||
|
||||
service salt_minion start
|
||||
|
||||
If your Salt Minion doesn't start successfully, go back through each step and
|
||||
see if anything was missed. Salt doesn't take much configuration (part of its
|
||||
beauty!), and errors are usually simple mistakes.
|
||||
|
||||
.. _tying_it_all_together:
|
||||
|
||||
Tying It All Together
|
||||
======================
|
||||
|
||||
If you've successfully completed each of the steps above you should have a
|
||||
running Salt Master and a running Salt Minion. The Minion should be configured
|
||||
to point to the Master. To verify that there is communication flowing between
|
||||
the Minion and Master we'll run a few initial ``salt`` commands. These commands
|
||||
will validate the Minions RSA encryption key, and then send a test command to
|
||||
the Minion to ensure that commands and responses are flowing as expected.
|
||||
|
||||
**Key Management**
|
||||
|
||||
Salt uses AES encryption for all communication between the Master and the
|
||||
Minion. This ensures that the commands you send to your Minions (your cloud)
|
||||
can not be tampered with, and that communication between Master and Minion is
|
||||
only done through trusted, accepted keys.
|
||||
|
||||
Before you'll be able to do any remote execution or state management you'll
|
||||
need to accept any pending keys on the Master. Run the ``salt-key`` command to
|
||||
list the keys known to the Salt Master::
|
||||
|
||||
[root@master ~]# salt-key -L
|
||||
Unaccepted Keys:
|
||||
avon
|
||||
bodie
|
||||
bubbles
|
||||
marlo
|
||||
Accepted Keys:
|
||||
|
||||
This example shows that the Salt Master is aware of four Minions, but none of
|
||||
the keys have been accepted. To accept the keys and allow the Minions to be
|
||||
controlled by the Master, again use the ``salt-key`` command::
|
||||
|
||||
[root@master ~]# salt-key -A
|
||||
[root@master ~]# salt-key -L
|
||||
Unaccepted Keys:
|
||||
Accepted Keys:
|
||||
avon
|
||||
bodie
|
||||
bubbles
|
||||
marlo
|
||||
|
||||
The ``salt-key`` command allows for signing keys individually or in bulk. The
|
||||
example above, using ``-A`` bulk-accepts all pending keys. To accept keys
|
||||
individually use the lowercase of the same option, ``-a keyname``.
|
||||
|
||||
.. _sending_commands:
|
||||
|
||||
Sending Commands
|
||||
================
|
||||
|
||||
Everything should be set for you to begin remote management of your Minions.
|
||||
Whether you have a few or a few-dozen, Salt can help you manage them easily!
|
||||
|
||||
For final verification, send a test function from your Salt Master to your
|
||||
minions. If all of your minions are properly communicating with your Master,
|
||||
you should "True" responses from each of them. See the example below to send
|
||||
the ``test.ping`` remote command::
|
||||
|
||||
[root@avon ~]# salt '*' test.ping
|
||||
{'avon': True}
|
||||
|
||||
.. _where_do_i_go_from_here:
|
||||
|
||||
Where Do I Go From Here
|
||||
========================
|
||||
|
||||
Congratulations! You've successfully configured your first Salt Minions and are
|
||||
able to send remote commands. I'm sure you're eager to learn more about what
|
||||
Salt can do. Depending on the primary way you want to manage your machines you
|
||||
may either want to visit the section regarding Salt States, or the section on
|
||||
Modules.
|
457
doc/topics/tutorials/starting_states.rst
Normal file
457
doc/topics/tutorials/starting_states.rst
Normal file
|
@ -0,0 +1,457 @@
|
|||
=========================
|
||||
How Do I Use Salt States?
|
||||
=========================
|
||||
|
||||
Simplicity, Simplicity, Simplicity
|
||||
|
||||
Many of the most powerful and useful engineering solutions are founded on
|
||||
simple principals, the Salt SLS system strives to do just that.
|
||||
|
||||
The core of the Salt State system is the SLS, or the SaLt State file. The SLS
|
||||
is a representation of the state in which a system should be in, and is set up
|
||||
to contain this data in the most simple way possible.
|
||||
|
||||
It is All Just Data
|
||||
===================
|
||||
|
||||
Before delving into the particulars, it will help to understand that the SLS
|
||||
is just a data structure under the hood. While understanding that the SLS is
|
||||
just a data structure is not at all critical to understand to make use Salt States,
|
||||
it should help bolster the understanding of where the real power is.
|
||||
|
||||
SLS files are therefore, in reality, just dictionaries, lists strings and
|
||||
numbers. By using this approach Salt can be much more flexible, and as someone
|
||||
writes more SLS files it becomes clear exactly what is being written. The result
|
||||
is a system that is easy to understand, yet grows with the needs of the admin
|
||||
or developer, offering simple constructs that grow to encompass the most
|
||||
complicated needs.
|
||||
|
||||
In the section titled "State Data Structures" a reference exists, explaining
|
||||
in depth how the data is laid out.
|
||||
|
||||
Default Data - YAML
|
||||
===================
|
||||
|
||||
By default Salt represents the SLS data in what is one of the simplest
|
||||
serialization formats available - YAML.
|
||||
|
||||
A typical, small SLS file will often look like this in YAML:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
apache:
|
||||
pkg:
|
||||
- installed
|
||||
service:
|
||||
- running
|
||||
- require:
|
||||
- pkg: apache
|
||||
|
||||
This SLS data will ensure that the package named apache is installed, and
|
||||
that the apache service is running. The components can be explained in a
|
||||
simple way.
|
||||
|
||||
The first like it the ID for a set of data, and it is called the ID
|
||||
Declaration. This ID sets the name of the thing that needs to be manipulated.
|
||||
|
||||
The second and fourth lines are the start of the State Declarations, so they
|
||||
are using the pkg and service states respectively. The pkg state manages a
|
||||
software package to get installed via the system's native package manager,
|
||||
and the service state manages a system daemon. Below the pkg and service
|
||||
lines are the function to run. This function defines what state the named
|
||||
package and service should be in. Here the package is to be installed, and
|
||||
the service should be running.
|
||||
|
||||
Finally, on line 6, is the word ``require``, this is called a Requisite
|
||||
Statement, and it makes sure that the apache service is only started after
|
||||
the successful installation of the apache package.
|
||||
|
||||
Adding Configs and Users
|
||||
========================
|
||||
|
||||
When setting up a service like an apache server many more components may
|
||||
need to be added. The apache configuration file will most likely be managed,
|
||||
and a user and group may need to be set up.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
apache:
|
||||
pkg:
|
||||
- installed
|
||||
service:
|
||||
- running
|
||||
- watch:
|
||||
- pkg: apache
|
||||
- file: /etc/httpd/conf/httpd.conf
|
||||
- user: apache
|
||||
user:
|
||||
- present
|
||||
- uid: 87
|
||||
- gid: 87
|
||||
- home: /var/www/html
|
||||
- shell: /bin/nologin
|
||||
- require:
|
||||
- group: apache
|
||||
group:
|
||||
- present
|
||||
- gid: 87
|
||||
- require:
|
||||
- pkg: apache
|
||||
|
||||
/etc/httpd/conf/httpd.conf:
|
||||
file:
|
||||
- managed
|
||||
- source: salt://apache/httpd.conf
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 644
|
||||
|
||||
This SLS data greatly extends the first example, and includes a config file,
|
||||
a user, a group and new requisite statement: ``watch``.
|
||||
|
||||
Adding more states is easy, since the new user and group states are under
|
||||
the apache ID, the user and group will be the apache user and group. The
|
||||
``require`` statements will make sure that the user will only be made after
|
||||
the group, and that the group will be made only after the apache package is
|
||||
installed.
|
||||
|
||||
Next,the ``require`` statement under service was changed to watch, and is
|
||||
now watching 3 states instead of just one. The watch statement does the same
|
||||
thing as require, making sure that the other states run before running the
|
||||
state with a watch, but it adds an extra component. The ``watch`` statement
|
||||
will run the state's watcher function if any of the watched states changed
|
||||
anything. So if the package was updated, the config file changed, or the user
|
||||
uid modified, then the service state's watcher will be run. The service
|
||||
state's watcher just restarts the service, so in this case, a change in the
|
||||
config file will also trigger a restart of the respective service.
|
||||
|
||||
Moving Beyond a Single SLS
|
||||
==========================
|
||||
|
||||
When setting up Salt States more than one SLS will need to be used, the above
|
||||
examples were just in a single SLS file, but more than one SLS file can be
|
||||
combined to build out a State Tree. The above example also references a file
|
||||
with a strange source - salt://apache/httpd.conf, that file will need to be
|
||||
available as well.
|
||||
|
||||
The SLS files are laid out in a directory on the salt master. Files are laid
|
||||
out as just files, an sls is just a file and files to download are just files.
|
||||
|
||||
The apache example would be laid out in the root of the salt file server like
|
||||
this:
|
||||
|
||||
/apache/init.sls
|
||||
/apache/httpd.conf
|
||||
|
||||
So the httpd.conf is just a file in the apache directory, and is referenced
|
||||
directly.
|
||||
|
||||
But with more than a single SLS file, more components can be added to the
|
||||
toolkit, consider this ssh example:
|
||||
|
||||
/ssh/init.sls
|
||||
.. code-block:: yaml
|
||||
|
||||
openssh-client:
|
||||
pkg:
|
||||
- installed
|
||||
|
||||
/etc/ssh/ssh_config
|
||||
file:
|
||||
- managed
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 644
|
||||
- source: salt://ssh/ssh_config
|
||||
- require:
|
||||
- pkg: openssh-client
|
||||
|
||||
/ssh/server.sls
|
||||
.. code-block:: yaml
|
||||
|
||||
include:
|
||||
- ssh
|
||||
|
||||
openssh-server:
|
||||
pkg:
|
||||
- installed
|
||||
|
||||
sshd:
|
||||
service:
|
||||
- running
|
||||
- require:
|
||||
- pkg: openssh-client
|
||||
- pkg: openssh-server
|
||||
- file: /etc/ssh/banner
|
||||
- file: /etc/ssh/sshd_config
|
||||
|
||||
/etc/ssh/sshd_config:
|
||||
- managed
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 644
|
||||
- source: salt://ssh/sshd_config
|
||||
- require:
|
||||
- pkg: openssh-server
|
||||
|
||||
/etc/ssh/banner:
|
||||
file:
|
||||
- managed
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 644
|
||||
- source: salt://ssh/banner
|
||||
- require:
|
||||
- pkg: openssh-server
|
||||
|
||||
Now our State Tree looks like this:
|
||||
|
||||
/apache/init.sls
|
||||
/apache/httpd.conf
|
||||
/ssh/init.sls
|
||||
/ssh/server.sls
|
||||
/ssh/banner
|
||||
/ssh/ssh_config
|
||||
/ssh/sshd_config
|
||||
|
||||
This example now introduces the ``include`` statement. The include statement
|
||||
includes another SLS file so that components found in it can be required,
|
||||
watched or as will soon be demonstrated - extended.
|
||||
|
||||
The include statement allows for states to be cross linked. When an SLS
|
||||
has an include statement it is literally extended to include the contents of
|
||||
the included SLS files.
|
||||
|
||||
Extending Included SLS Data
|
||||
===========================
|
||||
|
||||
Sometimes SLS data needs to be extended. Perhaps the apache service needs to
|
||||
watch additional resources, or under certain circumstances a different file
|
||||
needs to be placed.
|
||||
|
||||
These examples will add more watchers to apache and change the ssh banner.
|
||||
|
||||
/ssh/custom-server.sls
|
||||
.. code-block:: yaml
|
||||
|
||||
include:
|
||||
- ssh.server
|
||||
|
||||
extend:
|
||||
/etc/ssh/banner:
|
||||
file:
|
||||
- source: salt://ssh/custom-banner
|
||||
|
||||
/python/mod_python.sls
|
||||
.. code-block:: yaml
|
||||
|
||||
include:
|
||||
- apache
|
||||
|
||||
extend:
|
||||
apache:
|
||||
service:
|
||||
- watch:
|
||||
- pkg: mod_python
|
||||
|
||||
mod_python:
|
||||
pkg:
|
||||
- installed
|
||||
|
||||
The custom-server.sls file uses the extend statement to overwrite where the
|
||||
banner is being downloaded from, and therefore changing what file is being used
|
||||
to configure the banner.
|
||||
|
||||
In the new mod_python SLS the mod_python package is added, but more importantly
|
||||
the apache service was extended to also watch the mod_python package.
|
||||
|
||||
There is a bit of a trick here, in the extend statement Requisite Statements
|
||||
are extended, so the ``- pkg: mod_python`` is appended to the watch list. But
|
||||
all other statements are overwritten.
|
||||
|
||||
Understanding the Render System
|
||||
===============================
|
||||
|
||||
Since the SLS data is just plain old data, it does not need to be represented
|
||||
with YAML. Salt defaults to YAML because it is very straightforward and easy
|
||||
to learn and use. But the SLS files can be rendered from almost any imaginable
|
||||
medium, so long as a renderer module is provided.
|
||||
|
||||
The default rendering system is the ``yaml_jinja`` renderer. The
|
||||
``yaml_jinja`` renderer will first pass the template through the jinja
|
||||
templating system, and then through the YAML parser. The benefit here is that
|
||||
full programming constructs are available when creating SLS files.
|
||||
|
||||
Other renderers available are ``yaml_mako`` which uses the mako templating
|
||||
system rather than the jinja templating system, and more notably, the pure
|
||||
python or ``py`` renderer. The ``py`` renderer allows for SLS files to be
|
||||
written in pure python, allowing for the utmost level of flexibility and
|
||||
power when preparing SLS data.
|
||||
|
||||
Geting to Know the Default - yaml_jinja
|
||||
---------------------------------------
|
||||
|
||||
The default renderer - ``yaml_jinja``, allows for the use of the jinja
|
||||
templating system. A guide to the jinja templating system can be found here:
|
||||
<link to the jinga templating docs page>.
|
||||
|
||||
When working with renderers a few very useful bits of data are passed in. In
|
||||
the case of templating engine based renderers two critical components are
|
||||
available, ``salt`` and ``grains``. The salt object allows for any salt
|
||||
function to be called from within the template, and grains allows for the
|
||||
grains to be accessed from within the template. A few examples are in order:
|
||||
|
||||
/apache/init.sls
|
||||
.. code-block:: yaml
|
||||
|
||||
apache:
|
||||
pkg:
|
||||
- installed
|
||||
{% if grains['os'] == 'RedHat'%}
|
||||
- name: httpd
|
||||
{% endif %}
|
||||
service:
|
||||
- running
|
||||
{% if grains['os'] == 'RedHat'%}
|
||||
- name: httpd
|
||||
{% endif %}
|
||||
- watch:
|
||||
- pkg: apache
|
||||
- file: /etc/httpd/conf/httpd.conf
|
||||
- user: apache
|
||||
user:
|
||||
- present
|
||||
- uid: 87
|
||||
- gid: 87
|
||||
- home: /var/www/html
|
||||
- shell: /bin/nologin
|
||||
- require:
|
||||
- group: apache
|
||||
group:
|
||||
- present
|
||||
- gid: 87
|
||||
- require:
|
||||
- pkg: apache
|
||||
|
||||
/etc/httpd/conf/httpd.conf:
|
||||
file:
|
||||
- managed
|
||||
- source: salt://apache/httpd.conf
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 644
|
||||
|
||||
This example is simple, if the os grain states that the operating system is
|
||||
Red Hat, then the name of the apache package and service needs to be httpd.
|
||||
|
||||
A more aggressive way to use Jinja can be found here, in a module to set up
|
||||
a MooseFS distributed filesystem chunkserver:
|
||||
|
||||
/moosefs/chunk.sls
|
||||
.. code-block:: yaml
|
||||
|
||||
include:
|
||||
- moosefs
|
||||
|
||||
{% for mnt in salt['cmd.run']('ls /dev/data/moose*').split() %}
|
||||
/mnt/moose{{ mnt[-1] }}:
|
||||
mount:
|
||||
- mounted
|
||||
- device: {{ mnt }}
|
||||
- fstype: xfs
|
||||
- mkmnt: True
|
||||
file:
|
||||
- directory
|
||||
- user: mfs
|
||||
- group: mfs
|
||||
- require:
|
||||
- user: mfs
|
||||
- group: mfs
|
||||
{% endfor %}
|
||||
|
||||
'/etc/mfshdd.cfg':
|
||||
file:
|
||||
- managed
|
||||
- source: salt://moosefs/mfshdd.cfg
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 644
|
||||
- template: jinja
|
||||
- require:
|
||||
- pkg: mfs-chunkserver
|
||||
|
||||
'/etc/mfschunkserver.cfg':
|
||||
file:
|
||||
- managed
|
||||
- source: salt://moosefs/mfschunkserver.cfg
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 644
|
||||
- template: jinja
|
||||
- require:
|
||||
- pkg: mfs-chunkserver
|
||||
|
||||
mfs-chunkserver:
|
||||
pkg:
|
||||
- installed
|
||||
mfschunkserver:
|
||||
service:
|
||||
- running
|
||||
- require:
|
||||
{% for mnt in salt['cmd.run']('ls /dev/data/moose*') %}
|
||||
- mount: /mnt/moose{{ mnt[-1] }}
|
||||
- file: /mnt/moose{{ mnt[-1] }}
|
||||
{% endfor %}
|
||||
- file: /etc/mfschunkserver.cfg
|
||||
- file: /etc/mfshdd.cfg
|
||||
- file: /var/lib/mfs
|
||||
|
||||
This example shows much more of the available power provided by Jinja.
|
||||
Multiple for loops are used to dynamically detect available hard drives
|
||||
and set them up to be mounted, and the ``salt`` object is used multiple
|
||||
times to call shell commands to gather data.
|
||||
|
||||
Introducing the Python Renderer
|
||||
-------------------------------
|
||||
|
||||
Sometimes the chosen default renderer might not have enough logical power to
|
||||
accomplish the needed task. When this happens, the python renderer can be
|
||||
used. Normally a yaml renderer should be used for the majority of SLS files,
|
||||
but a SLS file set to use another renderer can be easily added to the tree.
|
||||
|
||||
This example shows a very basic python SLS file:
|
||||
|
||||
/python/django.sls
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
#!py
|
||||
|
||||
def run():
|
||||
'''
|
||||
Install the django package
|
||||
'''
|
||||
return {'include': ['python'],
|
||||
'django': {'pkg': ['installed']}}
|
||||
|
||||
This is a very simple example, the first line has a SLS shebang line that
|
||||
tells Salt to not use the default renderer, but to use the ``py`` renderer.
|
||||
Then the run function is defined, the return value from the run function
|
||||
must be a Salt friendly data structure, or better known as a Salt
|
||||
``HighState`` data structure.
|
||||
|
||||
This python example would look like this if it were written in YAML:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
include:
|
||||
- python
|
||||
|
||||
django:
|
||||
pkg:
|
||||
- installed
|
||||
|
||||
This clearly illustrates, that not only is using the YAML renderer a wise
|
||||
decision as the default, but that unbridled power can be obtained where
|
||||
needed by using a pure python SLS.
|
||||
|
|
@ -119,7 +119,7 @@ Last, call :func:`state.highstate <salt.modules.state.highstate>` again and the
|
|||
minion will fetch and execute the highstate as well as our HTML file from the
|
||||
master using Salt's File Server::
|
||||
|
||||
salt '*' salt.highstate
|
||||
salt '*' state.highstate
|
||||
|
||||
Verify that Apache is now serving your custom HTML.
|
||||
|
||||
|
|
|
@ -19,12 +19,14 @@ backup=('etc/salt/master'
|
|||
makedepends=()
|
||||
optdepends=()
|
||||
options=()
|
||||
source=("https://github.com/downloads/thatch45/salt/$pkgname-$pkgver.tar.gz"
|
||||
source=("https://github.com/downloads/saltstack/salt/$pkgname-$pkgver.tar.gz"
|
||||
"salt-master"
|
||||
"salt-syndic"
|
||||
"salt-minion")
|
||||
md5sums=('26456860e89f53deaf75193da50b449a'
|
||||
'4baf45d1610d771b742de2cbd8951b9f')
|
||||
md5sums=('c27837bac06dadfdb51b4a2b63fe6d35'
|
||||
'1594591acb0a266854186a694da21103'
|
||||
'09683ef4966e401761f7d2db6ad4b692'
|
||||
'21ab2eac231e9f61bf002ba5f16f8a3d')
|
||||
|
||||
package() {
|
||||
cd $srcdir/$pkgname-$pkgver
|
||||
|
@ -32,8 +34,8 @@ package() {
|
|||
python2 setup.py install --root=$pkgdir/ --optimize=1
|
||||
|
||||
mkdir -p $pkgdir/etc/rc.d/
|
||||
cp $srcdir/pkg/arch/salt-master $pkgdir/etc/rc.d/
|
||||
cp $srcdir/pkg/arch/salt-minion $pkgdir/etc/rc.d/
|
||||
cp $srcdir/pkg/arch/salt-syndic $pkgdir/etc/rc.d/
|
||||
cp $srcdir/salt-master $pkgdir/etc/rc.d/
|
||||
cp $srcdir/salt-minion $pkgdir/etc/rc.d/
|
||||
cp $srcdir/salt-syndic $pkgdir/etc/rc.d/
|
||||
chmod +x $pkgdir/etc/rc.d/*
|
||||
}
|
||||
|
|
|
@ -1,10 +1,10 @@
|
|||
# Maintainer: Thomas S Hatch <thatch45 at gmail dot com>
|
||||
pkgname=salt
|
||||
pkgname=salt-git
|
||||
pkgver=$(date +%Y%m%d)
|
||||
pkgrel=1
|
||||
pkgdesc="A remote execution and communication system built on zeromq"
|
||||
arch=('any')
|
||||
url="https://github.com/thatch45/salt"
|
||||
url="https://github.com/saltstack/salt"
|
||||
license=('APACHE')
|
||||
groups=()
|
||||
depends=('python2'
|
||||
|
@ -16,9 +16,10 @@ depends=('python2'
|
|||
'python2-jinja')
|
||||
makedepends=('git')
|
||||
provides=()
|
||||
backup=('etc/salt/master'
|
||||
backup=('etc/salt/master'
|
||||
'etc/salt/minion')
|
||||
options=()
|
||||
conflicts=('salt')
|
||||
source=("salt-master"
|
||||
"salt-minion"
|
||||
"salt-syndic")
|
||||
|
@ -52,10 +53,10 @@ package() {
|
|||
cd "$srcdir/$_gitname-build"
|
||||
|
||||
python2 setup.py install --root=$pkgdir/ --optimize=1
|
||||
|
||||
|
||||
mkdir -p $pkgdir/etc/rc.d/
|
||||
cp $srcdir/salt-master $pkgdir/etc/rc.d/
|
||||
cp $srcdir/salt-minion $pkgdir/etc/rc.d/
|
||||
cp $srcdir/salt-syndic $pkgdir/etc/rc.d/
|
||||
chmod +x $pkgdir/etc/rc.d/*
|
||||
}
|
||||
}
|
||||
|
|
10
pkg/rpm/salt-master.service
Normal file
10
pkg/rpm/salt-master.service
Normal file
|
@ -0,0 +1,10 @@
|
|||
[Unit]
|
||||
Description=The Salt Master Server
|
||||
After=syslog.target network.target
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
ExecStart=/usr/bin/salt-master
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
10
pkg/rpm/salt-minion.service
Normal file
10
pkg/rpm/salt-minion.service
Normal file
|
@ -0,0 +1,10 @@
|
|||
[Unit]
|
||||
Description=The Salt Minion
|
||||
After=syslog.target network.target
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
ExecStart=/usr/bin/salt-minion
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
10
pkg/rpm/salt-syndic.service
Normal file
10
pkg/rpm/salt-syndic.service
Normal file
|
@ -0,0 +1,10 @@
|
|||
[Unit]
|
||||
Description=The Salt Master Server
|
||||
After=syslog.target network.target
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
ExecStart=/usr/bin/salt-syndic
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
|
@ -1,18 +1,21 @@
|
|||
'''
|
||||
Make me some salt!
|
||||
'''
|
||||
|
||||
__version_info__ = (0, 9, 4)
|
||||
__version__ = '.'.join(map(str, __version_info__))
|
||||
from salt.version import __version__
|
||||
|
||||
# Import python libs
|
||||
import optparse
|
||||
import os
|
||||
import sys
|
||||
|
||||
# Import salt libs
|
||||
import salt.config
|
||||
|
||||
# Import salt libs, the try block bypasses an issue at build time so that c
|
||||
# modules don't cause the build to fail
|
||||
try:
|
||||
import salt.config
|
||||
import salt.utils.verify
|
||||
except ImportError as e:
|
||||
if e.message != 'No module named _msgpack':
|
||||
raise
|
||||
|
||||
def verify_env(dirs):
|
||||
'''
|
||||
|
@ -25,6 +28,8 @@ def verify_env(dirs):
|
|||
os.makedirs(dir_)
|
||||
except OSError, e:
|
||||
print 'Failed to create directory path "%s" - %s' % (dir_, e)
|
||||
# Run the extra verification checks
|
||||
salt.utils.verify.run()
|
||||
|
||||
|
||||
class Master(object):
|
||||
|
@ -87,6 +92,7 @@ class Master(object):
|
|||
for name, level in self.opts['log_granular_levels'].iteritems():
|
||||
salt.log.set_logger_level(name, level)
|
||||
import logging
|
||||
log = logging.getLogger(__name__)
|
||||
# Late import so logging works correctly
|
||||
import salt.master
|
||||
master = salt.master.Master(self.opts)
|
||||
|
@ -142,8 +148,10 @@ class Minion(object):
|
|||
'''
|
||||
Execute this method to start up a minion.
|
||||
'''
|
||||
verify_env([self.opts['pki_dir'], self.opts['cachedir'],
|
||||
os.path.dirname(self.opts['log_file']),
|
||||
verify_env([self.opts['pki_dir'],
|
||||
self.opts['cachedir'],
|
||||
self.opts['extension_modules'],
|
||||
os.path.dirname(self.opts['log_file']),
|
||||
])
|
||||
import salt.log
|
||||
salt.log.setup_logfile_logger(
|
||||
|
@ -156,12 +164,17 @@ class Minion(object):
|
|||
|
||||
# Late import so logging works correctly
|
||||
import salt.minion
|
||||
if self.cli['daemon']:
|
||||
# Late import so logging works correctly
|
||||
import salt.utils
|
||||
salt.utils.daemonize()
|
||||
minion = salt.minion.Minion(self.opts)
|
||||
minion.tune_in()
|
||||
log = logging.getLogger(__name__)
|
||||
try:
|
||||
if self.cli['daemon']:
|
||||
# Late import so logging works correctly
|
||||
import salt.utils
|
||||
salt.utils.daemonize()
|
||||
minion = salt.minion.Minion(self.opts)
|
||||
minion.tune_in()
|
||||
except KeyboardInterrupt:
|
||||
log.warn('Stopping the Salt Minion')
|
||||
raise SystemExit('\nExiting on Ctrl-c')
|
||||
|
||||
|
||||
class Syndic(object):
|
||||
|
@ -253,9 +266,14 @@ class Syndic(object):
|
|||
|
||||
# Late import so logging works correctly
|
||||
import salt.minion
|
||||
syndic = salt.minion.Syndic(self.opts)
|
||||
if self.cli['daemon']:
|
||||
# Late import so logging works correctly
|
||||
import salt.utils
|
||||
salt.utils.daemonize()
|
||||
syndic.tune_in()
|
||||
log = logging.getLogger(__name__)
|
||||
try:
|
||||
syndic = salt.minion.Syndic(self.opts)
|
||||
if self.cli['daemon']:
|
||||
# Late import so logging works correctly
|
||||
import salt.utils
|
||||
salt.utils.daemonize()
|
||||
syndic.tune_in()
|
||||
except KeyboardInterrupt:
|
||||
log.warn('Stopping the Salt Syndic Minion')
|
||||
raise SystemExit('\nExiting on Ctrl-c')
|
||||
|
|
|
@ -7,12 +7,7 @@ import optparse
|
|||
import os
|
||||
import sys
|
||||
import yaml
|
||||
JSON = False
|
||||
try:
|
||||
import json
|
||||
JSON = True
|
||||
except:
|
||||
pass
|
||||
import json
|
||||
|
||||
# Import salt components
|
||||
import salt.cli.caller
|
||||
|
@ -23,6 +18,7 @@ import salt.output
|
|||
import salt.runner
|
||||
|
||||
from salt import __version__ as VERSION
|
||||
from salt.exceptions import SaltInvocationError
|
||||
|
||||
|
||||
class SaltCMD(object):
|
||||
|
@ -79,6 +75,26 @@ class SaltCMD(object):
|
|||
action='store_true',
|
||||
help=('Instead of using shell globs use the return code '
|
||||
'of a function.'))
|
||||
parser.add_option('-N',
|
||||
'--nodegroup',
|
||||
default=False,
|
||||
dest='nodegroup',
|
||||
action='store_true',
|
||||
help=('Instead of using shell globs to evaluate the target '
|
||||
'use one of the predefined nodegroups to identify a '
|
||||
'list of targets.'))
|
||||
parser.add_option('-C',
|
||||
'--compound',
|
||||
default=False,
|
||||
dest='compound',
|
||||
action='store_true',
|
||||
help=('The compound target option allows for multiple '
|
||||
'target types to be evaluated, allowing for greater '
|
||||
'granularity in target matching. The compound target '
|
||||
'is space delimited, targets other than globs are '
|
||||
'preceted with an identifyer matching the specific '
|
||||
'targets argument type: salt \'G@os:RedHat and '
|
||||
'webser* or E@database.*\''))
|
||||
parser.add_option('--return',
|
||||
default='',
|
||||
dest='return_',
|
||||
|
@ -118,12 +134,11 @@ class SaltCMD(object):
|
|||
action='store_true',
|
||||
dest='yaml_out',
|
||||
help='Print the output from the salt command in yaml.')
|
||||
if JSON:
|
||||
parser.add_option('--json-out',
|
||||
default=False,
|
||||
action='store_true',
|
||||
dest='json_out',
|
||||
help='Print the output from the salt command in json.')
|
||||
parser.add_option('--json-out',
|
||||
default=False,
|
||||
action='store_true',
|
||||
dest='json_out',
|
||||
help='Print the output from the salt command in json.')
|
||||
|
||||
options, args = parser.parse_args()
|
||||
|
||||
|
@ -134,15 +149,14 @@ class SaltCMD(object):
|
|||
opts['list'] = options.list_
|
||||
opts['grain'] = options.grain
|
||||
opts['exsel'] = options.exsel
|
||||
opts['nodegroup'] = options.nodegroup
|
||||
opts['compound'] = options.compound
|
||||
opts['return'] = options.return_
|
||||
opts['conf_file'] = options.conf_file
|
||||
opts['raw_out'] = options.raw_out
|
||||
opts['txt_out'] = options.txt_out
|
||||
opts['yaml_out'] = options.yaml_out
|
||||
if JSON:
|
||||
opts['json_out'] = options.json_out
|
||||
else:
|
||||
opts['json_out'] = False
|
||||
opts['json_out'] = options.json_out
|
||||
|
||||
if opts['return']:
|
||||
if opts['timeout'] == 5:
|
||||
|
@ -189,7 +203,26 @@ class SaltCMD(object):
|
|||
'''
|
||||
local = salt.client.LocalClient(self.opts['conf_file'])
|
||||
if 'query' in self.opts:
|
||||
print local.find_cmd(self.opts['cmd'])
|
||||
ret = local.find_cmd(self.opts['cmd'])
|
||||
for jid in ret:
|
||||
if isinstance(ret, list) or isinstance(ret, dict):
|
||||
# Determine the proper output method and run it
|
||||
get_outputter = salt.output.get_outputter
|
||||
if self.opts['raw_out']:
|
||||
printout = get_outputter('raw')
|
||||
elif self.opts['json_out']:
|
||||
printout = get_outputter('json')
|
||||
elif self.opts['txt_out']:
|
||||
printout = get_outputter('txt')
|
||||
elif self.opts['yaml_out']:
|
||||
printout = get_outputter('yaml')
|
||||
else:
|
||||
printout = get_outputter(None)
|
||||
|
||||
print 'Return data for job {0}:'.format(jid)
|
||||
printout(ret[jid])
|
||||
print ''
|
||||
|
||||
else:
|
||||
args = [self.opts['tgt'],
|
||||
self.opts['fun'],
|
||||
|
@ -204,21 +237,29 @@ class SaltCMD(object):
|
|||
args.append('grain')
|
||||
elif self.opts['exsel']:
|
||||
args.append('exsel')
|
||||
elif self.opts['nodegroup']:
|
||||
args.append('nodegroup')
|
||||
elif self.opts['compound']:
|
||||
args.append('compound')
|
||||
else:
|
||||
args.append('glob')
|
||||
|
||||
if self.opts['return']:
|
||||
args.append(self.opts['return'])
|
||||
full_ret = local.cmd_full_return(*args)
|
||||
ret, out = self._format_ret(full_ret)
|
||||
try:
|
||||
full_ret = local.cmd_full_return(*args)
|
||||
ret, out = self._format_ret(full_ret)
|
||||
except SaltInvocationError as exc:
|
||||
ret = exc
|
||||
out = ''
|
||||
|
||||
# Handle special case commands
|
||||
if self.opts['fun'] == 'sys.doc':
|
||||
self._print_docs(ret)
|
||||
else:
|
||||
# Determine the proper output method and run it
|
||||
get_outputter = salt.output.get_outputter
|
||||
if isinstance(ret, list) or isinstance(ret, dict):
|
||||
# Determine the proper output method and run it
|
||||
get_outputter = salt.output.get_outputter
|
||||
if self.opts['raw_out']:
|
||||
printout = get_outputter('raw')
|
||||
elif self.opts['json_out']:
|
||||
|
@ -231,8 +272,14 @@ class SaltCMD(object):
|
|||
printout = get_outputter(out)
|
||||
else:
|
||||
printout = get_outputter(None)
|
||||
elif isinstance(ret, SaltInvocationError):
|
||||
# Pretty print invocation errors
|
||||
printout = get_outputter("txt")
|
||||
printout(ret)
|
||||
|
||||
printout(ret)
|
||||
# Always exit with a return code of 1 on issues
|
||||
if isinstance(ret, Exception):
|
||||
sys.exit(1)
|
||||
|
||||
def _format_ret(self, full_ret):
|
||||
'''
|
||||
|
@ -307,6 +354,14 @@ class SaltCP(object):
|
|||
'use a grain value to identify targets, the syntax '
|
||||
'for the target is the grains key followed by a pcre '
|
||||
'regular expression:\n"os:Arch.*"'))
|
||||
parser.add_option('-N',
|
||||
'--nodegroup',
|
||||
default=False,
|
||||
dest='nodegroup',
|
||||
action='store_true',
|
||||
help=('Instead of using shell globs to evaluate the target '
|
||||
'use one of the predefined nodegroups to identify a '
|
||||
'list of targets.'))
|
||||
parser.add_option('-c',
|
||||
'--config',
|
||||
default='/etc/salt/master',
|
||||
|
@ -323,6 +378,7 @@ class SaltCP(object):
|
|||
opts['pcre'] = options.pcre
|
||||
opts['list'] = options.list_
|
||||
opts['grain'] = options.grain
|
||||
opts['nodegroup'] = options.nodegroup
|
||||
opts['conf_file'] = options.conf_file
|
||||
|
||||
if opts['list']:
|
||||
|
@ -417,7 +473,9 @@ class SaltKey(object):
|
|||
default=2048,
|
||||
type=int,
|
||||
help=('Set the keysize for the generated key, only works with '
|
||||
'the "--gen-keys" option; default=2048'))
|
||||
'the "--gen-keys" option, the key size must be 2048 or '
|
||||
'higher, otherwise it will be rounded up to 2048'
|
||||
'; default=2048'))
|
||||
|
||||
parser.add_option('-c',
|
||||
'--config',
|
||||
|
@ -438,7 +496,10 @@ class SaltKey(object):
|
|||
opts['delete'] = options.delete
|
||||
opts['gen_keys'] = options.gen_keys
|
||||
opts['gen_keys_dir'] = options.gen_keys_dir
|
||||
opts['keysize'] = options.keysize
|
||||
if options.keysize < 2048:
|
||||
opts['keysize'] = 2048
|
||||
else:
|
||||
opts['keysize'] = options.keysize
|
||||
|
||||
opts.update(salt.config.master_config(options.config))
|
||||
|
||||
|
@ -512,12 +573,11 @@ class SaltCall(object):
|
|||
action='store_true',
|
||||
dest='yaml_out',
|
||||
help='Print the output from the salt command in yaml.')
|
||||
if JSON:
|
||||
parser.add_option('--json-out',
|
||||
default=False,
|
||||
action='store_true',
|
||||
dest='json_out',
|
||||
help='Print the output from the salt command in json.')
|
||||
parser.add_option('--json-out',
|
||||
default=False,
|
||||
action='store_true',
|
||||
dest='json_out',
|
||||
help='Print the output from the salt command in json.')
|
||||
parser.add_option('--no-color',
|
||||
default=False,
|
||||
dest='no_color',
|
||||
|
@ -535,10 +595,7 @@ class SaltCall(object):
|
|||
opts['txt_out'] = options.txt_out
|
||||
opts['yaml_out'] = options.yaml_out
|
||||
opts['color'] = not options.no_color
|
||||
if JSON:
|
||||
opts['json_out'] = options.json_out
|
||||
else:
|
||||
opts['json_out'] = False
|
||||
opts['json_out'] = options.json_out
|
||||
opts.update(salt.config.minion_config(options.config))
|
||||
opts['log_level'] = options.log_level
|
||||
if len(args) >= 1:
|
||||
|
|
|
@ -4,7 +4,7 @@ minion modules.
|
|||
'''
|
||||
|
||||
# Import python modules
|
||||
import pprint
|
||||
import sys
|
||||
|
||||
# Import salt libs
|
||||
import salt
|
||||
|
@ -30,10 +30,15 @@ class Caller(object):
|
|||
'''
|
||||
ret = {}
|
||||
if self.opts['fun'] not in self.minion.functions:
|
||||
print 'Function {0} is not available'.format(self.opts['fun'])
|
||||
ret['return'] = self.minion.functions[self.opts['fun']](
|
||||
*self.opts['arg']
|
||||
)
|
||||
sys.stderr.write('Function {0} is not available\n'.format(self.opts['fun']))
|
||||
sys.exit(1)
|
||||
try:
|
||||
ret['return'] = self.minion.functions[self.opts['fun']](
|
||||
*self.opts['arg']
|
||||
)
|
||||
except TypeError, exc:
|
||||
sys.stderr.write('Error running \'{0}\': {1}\n'.format(self.opts['fun'], str(exc)))
|
||||
sys.exit(1)
|
||||
if hasattr(self.minion.functions[self.opts['fun']], '__outputter__'):
|
||||
oput = self.minion.functions[self.opts['fun']].__outputter__
|
||||
if isinstance(oput, str):
|
||||
|
@ -58,7 +63,27 @@ class Caller(object):
|
|||
Print out the grains
|
||||
'''
|
||||
grains = salt.loader.grains(self.opts)
|
||||
pprint.pprint(grains)
|
||||
printout = self._get_outputter(out='yaml')
|
||||
# If --json-out is specified, pretty print it
|
||||
if 'json_out' in self.opts and self.opts['json_out']:
|
||||
printout.indent = 2
|
||||
printout(grains)
|
||||
|
||||
def _get_outputter(self, out=None):
|
||||
get_outputter = salt.output.get_outputter
|
||||
if self.opts['raw_out']:
|
||||
printout = get_outputter('raw')
|
||||
elif self.opts['json_out']:
|
||||
printout = get_outputter('json')
|
||||
elif self.opts['txt_out']:
|
||||
printout = get_outputter('txt')
|
||||
elif self.opts['yaml_out']:
|
||||
printout = get_outputter('yaml')
|
||||
elif out:
|
||||
printout = get_outputter(out)
|
||||
else:
|
||||
printout = get_outputter(None)
|
||||
return printout
|
||||
|
||||
def run(self):
|
||||
'''
|
||||
|
@ -71,18 +96,11 @@ class Caller(object):
|
|||
else:
|
||||
ret = self.call()
|
||||
# Determine the proper output method and run it
|
||||
get_outputter = salt.output.get_outputter
|
||||
if self.opts['raw_out']:
|
||||
printout = get_outputter('raw')
|
||||
elif self.opts['json_out']:
|
||||
printout = get_outputter('json')
|
||||
elif self.opts['txt_out']:
|
||||
printout = get_outputter('txt')
|
||||
elif self.opts['yaml_out']:
|
||||
printout = get_outputter('yaml')
|
||||
elif 'out' in ret:
|
||||
printout = get_outputter(ret['out'])
|
||||
if 'out' in ret:
|
||||
printout = self._get_outputter(ret['out'])
|
||||
else:
|
||||
printout = get_outputter(None)
|
||||
printout = self._get_outputter()
|
||||
if 'json_out' in self.opts and self.opts['json_out']:
|
||||
printout.indent = 2
|
||||
|
||||
printout({'local': ret['return']}, color=self.opts['color'])
|
||||
|
|
|
@ -76,6 +76,8 @@ class SaltCP(object):
|
|||
args.append('list')
|
||||
elif self.opts['grain']:
|
||||
args.append('grain')
|
||||
elif self.opts['nodegroup']:
|
||||
args.append('nodegroup')
|
||||
|
||||
ret = local.cmd(*args)
|
||||
|
||||
|
|
|
@ -26,7 +26,6 @@ The data structure needs to be:
|
|||
# small, and only start with the ability to execute salt commands locally.
|
||||
# This means that the primary client to build is, the LocalClient
|
||||
|
||||
import cPickle as pickle
|
||||
import datetime
|
||||
import glob
|
||||
import os
|
||||
|
@ -40,6 +39,7 @@ import zmq
|
|||
# Import salt modules
|
||||
import salt.config
|
||||
import salt.payload
|
||||
from salt.exceptions import SaltClientError, SaltInvocationError
|
||||
|
||||
|
||||
def prep_jid(cachedir):
|
||||
|
@ -59,19 +59,13 @@ def prep_jid(cachedir):
|
|||
return jid
|
||||
|
||||
|
||||
class SaltClientError(Exception):
|
||||
'''
|
||||
Custom exception class.
|
||||
'''
|
||||
pass
|
||||
|
||||
|
||||
class LocalClient(object):
|
||||
'''
|
||||
Connect to the salt master via the local server and via root
|
||||
'''
|
||||
def __init__(self, c_path='/etc/salt/master'):
|
||||
self.opts = salt.config.master_config(c_path)
|
||||
self.serial = salt.payload.Serial(self.opts)
|
||||
self.key = self.__read_master_key()
|
||||
|
||||
def __read_master_key(self):
|
||||
|
@ -200,7 +194,7 @@ class LocalClient(object):
|
|||
continue
|
||||
while fn_ not in ret:
|
||||
try:
|
||||
ret[fn_] = pickle.load(open(retp, 'r'))
|
||||
ret[fn_] = self.serial.load(open(retp, 'r'))
|
||||
except:
|
||||
pass
|
||||
if ret and start == 999999999999:
|
||||
|
@ -239,10 +233,10 @@ class LocalClient(object):
|
|||
continue
|
||||
while fn_ not in ret:
|
||||
try:
|
||||
ret_data = pickle.load(open(retp, 'r'))
|
||||
ret_data = self.serial.load(open(retp, 'r'))
|
||||
ret[fn_] = {'ret': ret_data}
|
||||
if os.path.isfile(outp):
|
||||
ret[fn_]['out'] = pickle.load(open(outp, 'r'))
|
||||
ret[fn_]['out'] = self.serial.load(open(outp, 'r'))
|
||||
except:
|
||||
pass
|
||||
if ret and start == 999999999999:
|
||||
|
@ -269,7 +263,7 @@ class LocalClient(object):
|
|||
loadp = os.path.join(jid_dir, '.load.p')
|
||||
if os.path.isfile(loadp):
|
||||
try:
|
||||
load = pickle.load(open(loadp, 'r'))
|
||||
load = self.serial.load(open(loadp, 'r'))
|
||||
if load['fun'] == cmd:
|
||||
# We found a match! Add the return values
|
||||
ret[jid] = {}
|
||||
|
@ -278,7 +272,7 @@ class LocalClient(object):
|
|||
retp = os.path.join(host_dir, 'return.p')
|
||||
if not os.path.isfile(retp):
|
||||
continue
|
||||
ret[jid][host] = pickle.load(open(retp))
|
||||
ret[jid][host] = self.serial.load(open(retp))
|
||||
except:
|
||||
continue
|
||||
else:
|
||||
|
@ -297,6 +291,7 @@ class LocalClient(object):
|
|||
'list': self._check_list_minions,
|
||||
'grain': self._check_grain_minions,
|
||||
'exsel': self._check_grain_minions,
|
||||
'compound': self._check_grain_minions,
|
||||
}[expr_form](expr)
|
||||
|
||||
def pub(self, tgt, fun, arg=(), expr_form='glob',
|
||||
|
@ -322,6 +317,14 @@ class LocalClient(object):
|
|||
minions:
|
||||
A set, the targets that the tgt passed should match.
|
||||
'''
|
||||
if expr_form == 'nodegroup':
|
||||
if tgt not in self.opts['nodegroups']:
|
||||
conf_file = self.opts.get('conf_file', 'the master config file')
|
||||
err = 'Node group {0} unavailable in {1}'.format(tgt, conf_file)
|
||||
raise SaltInvocationError(err)
|
||||
tgt = self.opts['nodegroups'][tgt]
|
||||
expr_form = 'compound'
|
||||
|
||||
# Run a check_minions, if no minions match return False
|
||||
# format the payload - make a function that does this in the payload
|
||||
# module
|
||||
|
@ -330,6 +333,7 @@ class LocalClient(object):
|
|||
# send!
|
||||
# return what we get back
|
||||
minions = self.check_minions(tgt, expr_form)
|
||||
|
||||
if not minions:
|
||||
return {'jid': '',
|
||||
'minions': minions}
|
||||
|
@ -369,7 +373,7 @@ class LocalClient(object):
|
|||
payload = None
|
||||
for ind in range(100):
|
||||
try:
|
||||
payload = salt.payload.unpackage(
|
||||
payload = self.serial.loads(
|
||||
socket.recv(
|
||||
zmq.NOBLOCK
|
||||
)
|
||||
|
|
|
@ -4,6 +4,7 @@ All salt configuration loading and defaults should be in this module
|
|||
|
||||
# Import python modules
|
||||
import os
|
||||
import tempfile
|
||||
import socket
|
||||
import sys
|
||||
|
||||
|
@ -31,6 +32,10 @@ def load_config(opts, path, env_var):
|
|||
if conf_opts == None:
|
||||
# The config file is empty and the yaml.load returned None
|
||||
conf_opts = {}
|
||||
else:
|
||||
# allow using numeric ids: convert int to string
|
||||
if 'id' in conf_opts:
|
||||
conf_opts['id'] = str(conf_opts['id'])
|
||||
opts.update(conf_opts)
|
||||
opts['conf_file'] = path
|
||||
except Exception, e:
|
||||
|
@ -45,8 +50,9 @@ def prepend_root_dir(opts, path_options):
|
|||
'root_dir' option.
|
||||
'''
|
||||
for path_option in path_options:
|
||||
opts[path_option] = os.path.normpath(
|
||||
os.sep.join([opts['root_dir'], opts[path_option]]))
|
||||
if path_option in opts:
|
||||
opts[path_option] = os.path.normpath(
|
||||
os.sep.join([opts['root_dir'], opts[path_option]]))
|
||||
|
||||
|
||||
def minion_config(path):
|
||||
|
@ -76,6 +82,8 @@ def minion_config(path):
|
|||
'log_granular_levels': {},
|
||||
'test': False,
|
||||
'cython_enable': False,
|
||||
'state_verbose': False,
|
||||
'acceptance_wait_time': 10,
|
||||
}
|
||||
|
||||
load_config(opts, path, 'SALT_MINION_CONFIG')
|
||||
|
@ -98,6 +106,9 @@ def minion_config(path):
|
|||
# Prepend root_dir to other paths
|
||||
prepend_root_dir(opts, ['pki_dir', 'cachedir', 'log_file'])
|
||||
|
||||
# set up the extension_modules location from the cachedir
|
||||
opts['extension_modules'] = os.path.join(opts['cachedir'], 'extmods')
|
||||
|
||||
return opts
|
||||
|
||||
|
||||
|
@ -108,7 +119,7 @@ def master_config(path):
|
|||
opts = {'interface': '0.0.0.0',
|
||||
'publish_port': '4505',
|
||||
'worker_threads': 5,
|
||||
'sock_dir': '/tmp/.salt-unix',
|
||||
'sock_dir': os.path.join(tempfile.gettempdir(), '.salt-unix'),
|
||||
'ret_port': '4506',
|
||||
'keep_jobs': 24,
|
||||
'root_dir': '/',
|
||||
|
@ -116,7 +127,7 @@ def master_config(path):
|
|||
'cachedir': '/var/cache/salt',
|
||||
'file_roots': {
|
||||
'base': ['/srv/salt'],
|
||||
},
|
||||
},
|
||||
'file_buffer_size': 1048576,
|
||||
'hash_type': 'md5',
|
||||
'conf_file': path,
|
||||
|
@ -131,14 +142,16 @@ def master_config(path):
|
|||
'log_granular_levels': {},
|
||||
'cluster_masters': [],
|
||||
'cluster_mode': 'paranoid',
|
||||
}
|
||||
'serial': 'msgpack',
|
||||
'nodegroups': {},
|
||||
}
|
||||
|
||||
load_config(opts, path, 'SALT_MASTER_CONFIG')
|
||||
|
||||
opts['aes'] = salt.crypt.Crypticle.generate_key_string()
|
||||
|
||||
# Prepend root_dir to other paths
|
||||
prepend_root_dir(opts, ['pki_dir', 'cachedir', 'log_file'])
|
||||
prepend_root_dir(opts, ['pki_dir', 'cachedir', 'log_file', 'sock_dir'])
|
||||
|
||||
# Enabling open mode requires that the value be set to True, and nothing
|
||||
# else!
|
||||
|
|
|
@ -1,11 +1,10 @@
|
|||
'''
|
||||
The crypt module manages all of the cyptogophy functions for minions and
|
||||
The crypt module manages all of the cryptography functions for minions and
|
||||
masters, encrypting and decrypting payloads, preparing messages, and
|
||||
authenticating peers
|
||||
'''
|
||||
|
||||
# Import python libs
|
||||
import cPickle as pickle
|
||||
import hashlib
|
||||
import hmac
|
||||
import logging
|
||||
|
@ -23,6 +22,7 @@ import zmq
|
|||
# Import salt utils
|
||||
import salt.payload
|
||||
import salt.utils
|
||||
from salt.exceptions import AuthenticationError
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
@ -42,7 +42,9 @@ def gen_keys(keydir, keyname, keysize):
|
|||
priv = '{0}.pem'.format(base)
|
||||
pub = '{0}.pub'.format(base)
|
||||
gen = RSA.gen_key(keysize, 1)
|
||||
cumask = os.umask(191)
|
||||
gen.save_key(priv, callback=foo_pass)
|
||||
os.umask(cumask)
|
||||
gen.save_pub_key(pub)
|
||||
key = RSA.load_key(priv, callback=foo_pass)
|
||||
os.chmod(priv, 256)
|
||||
|
@ -69,9 +71,9 @@ class MasterKeys(dict):
|
|||
key = None
|
||||
try:
|
||||
key = RSA.load_key(self.rsa_path, callback=foo_pass)
|
||||
log.debug('Loaded master key: %s', self.rsa_path)
|
||||
log.debug('Loaded master key: {0}'.format(self.rsa_path))
|
||||
except:
|
||||
log.info('Generating master key: %s', self.rsa_path)
|
||||
log.info('Generating master key: {0}'.format(self.rsa_path))
|
||||
key = gen_keys(self.opts['pki_dir'], 'master', 4096)
|
||||
return key
|
||||
|
||||
|
@ -98,6 +100,7 @@ class Auth(object):
|
|||
'''
|
||||
def __init__(self, opts):
|
||||
self.opts = opts
|
||||
self.serial = salt.payload.Serial(self.opts)
|
||||
self.rsa_path = os.path.join(self.opts['pki_dir'], 'minion.pem')
|
||||
if 'syndic_master' in self.opts:
|
||||
self.mpub = 'syndic_master.pub'
|
||||
|
@ -113,9 +116,9 @@ class Auth(object):
|
|||
key = None
|
||||
try:
|
||||
key = RSA.load_key(self.rsa_path, callback=foo_pass)
|
||||
log.debug('Loaded minion key: %s', self.rsa_path)
|
||||
log.debug('Loaded minion key: {0}'.format(self.rsa_path))
|
||||
except:
|
||||
log.info('Generating minion key: %s', self.rsa_path)
|
||||
log.info('Generating minion key: {0}'.format(self.rsa_path))
|
||||
key = gen_keys(self.opts['pki_dir'], 'minion', 4096)
|
||||
return key
|
||||
|
||||
|
@ -188,9 +191,9 @@ class Auth(object):
|
|||
context = zmq.Context()
|
||||
socket = context.socket(zmq.REQ)
|
||||
socket.connect(self.opts['master_uri'])
|
||||
payload = salt.payload.package(self.minion_sign_in_payload())
|
||||
payload = self.serial.dumps(self.minion_sign_in_payload())
|
||||
socket.send(payload)
|
||||
payload = salt.payload.unpackage(socket.recv())
|
||||
payload = self.serial.loads(socket.recv())
|
||||
if 'load' in payload:
|
||||
if 'ret' in payload['load']:
|
||||
if not payload['load']['ret']:
|
||||
|
@ -205,8 +208,9 @@ class Auth(object):
|
|||
else:
|
||||
log.error(
|
||||
'The Salt Master has cached the public key for this '
|
||||
'node, this salt minion will wait for 10 seconds '
|
||||
'before attempting to re-authenticate'
|
||||
'node, this salt minion will wait for %s seconds '
|
||||
'before attempting to re-authenticate',
|
||||
self.opts['acceptance_wait_time']
|
||||
)
|
||||
return 'retry'
|
||||
if not self.verify_master(payload['pub_key'], payload['token']):
|
||||
|
@ -224,14 +228,6 @@ class Auth(object):
|
|||
return auth
|
||||
|
||||
|
||||
class AuthenticationError(Exception):
|
||||
'''
|
||||
Custom exception class.
|
||||
'''
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class Crypticle(object):
|
||||
'''
|
||||
Authenticated encryption class
|
||||
|
@ -244,9 +240,10 @@ class Crypticle(object):
|
|||
AES_BLOCK_SIZE = 16
|
||||
SIG_SIZE = hashlib.sha256().digest_size
|
||||
|
||||
def __init__(self, key_string, key_size=192):
|
||||
def __init__(self, opts, key_string, key_size=192):
|
||||
self.keys = self.extract_keys(key_string, key_size)
|
||||
self.key_size = key_size
|
||||
self.serial = salt.payload.Serial(opts)
|
||||
|
||||
@classmethod
|
||||
def generate_key_string(cls, key_size=192):
|
||||
|
@ -288,21 +285,21 @@ class Crypticle(object):
|
|||
data = cypher.decrypt(data)
|
||||
return data[:-ord(data[-1])]
|
||||
|
||||
def dumps(self, obj, pickler=pickle):
|
||||
def dumps(self, obj):
|
||||
'''
|
||||
pickle and encrypt a python object
|
||||
Serialize and encrypt a python object
|
||||
'''
|
||||
return self.encrypt(self.PICKLE_PAD + pickler.dumps(obj))
|
||||
return self.encrypt(self.PICKLE_PAD + self.serial.dumps(obj))
|
||||
|
||||
def loads(self, data, pickler=pickle):
|
||||
def loads(self, data):
|
||||
'''
|
||||
decrypt and un-pickle a python object
|
||||
Decrypt and un-serialize a python object
|
||||
'''
|
||||
data = self.decrypt(data)
|
||||
# simple integrity check to verify that we got meaningful data
|
||||
if not data.startswith(self.PICKLE_PAD):
|
||||
return {}
|
||||
return pickler.loads(data[len(self.PICKLE_PAD):])
|
||||
return self.serial.loads(data[len(self.PICKLE_PAD):])
|
||||
|
||||
|
||||
class SAuth(Auth):
|
||||
|
@ -326,7 +323,7 @@ class SAuth(Auth):
|
|||
print 'Failed to authenticate with the master, verify that this'\
|
||||
+ ' minion\'s public key has been accepted on the salt master'
|
||||
sys.exit(2)
|
||||
return Crypticle(creds['aes'])
|
||||
return Crypticle(self.opts, creds['aes'])
|
||||
|
||||
def gen_token(self, clear_tok):
|
||||
'''
|
||||
|
|
53
salt/exceptions.py
Normal file
53
salt/exceptions.py
Normal file
|
@ -0,0 +1,53 @@
|
|||
'''
|
||||
This module is a central location for all salt exceptions
|
||||
'''
|
||||
|
||||
class SaltException(Exception):
|
||||
'''
|
||||
Base exception class; all Salt-specific exceptions should subclass this
|
||||
'''
|
||||
pass
|
||||
|
||||
class SaltClientError(SaltException):
|
||||
'''
|
||||
Problem reading the master root key
|
||||
'''
|
||||
pass
|
||||
|
||||
class AuthenticationError(SaltException):
|
||||
'''
|
||||
If sha256 signature fails during decryption
|
||||
'''
|
||||
pass
|
||||
|
||||
class CommandNotFoundError(SaltException):
|
||||
'''
|
||||
Used in modules or grains when a required binary is not available
|
||||
'''
|
||||
pass
|
||||
|
||||
class LoaderError(SaltException):
|
||||
'''
|
||||
Problems loading the right renderer
|
||||
'''
|
||||
pass
|
||||
|
||||
class MinionError(SaltException):
|
||||
'''
|
||||
Minion problems reading uris such as salt:// or http://
|
||||
'''
|
||||
pass
|
||||
|
||||
class SaltInvocationError(SaltException):
|
||||
'''
|
||||
Used when the wrong number of arguments are sent to modules
|
||||
or invalid arguments are specified on the command line
|
||||
'''
|
||||
pass
|
||||
|
||||
class PkgParseError(SaltException):
|
||||
'''
|
||||
Used when of the pkg modules cannot correctly parse the output from the CLI
|
||||
tool (pacman, yum, apt, aptitude, etc)
|
||||
'''
|
||||
pass
|
|
@ -15,8 +15,15 @@ as those returned here
|
|||
|
||||
import os
|
||||
import socket
|
||||
import subprocess
|
||||
import sys
|
||||
import re
|
||||
import platform
|
||||
import salt.utils
|
||||
|
||||
# Solve the Chicken and egg problem where grains need to run before any
|
||||
# of the modules are loaded and are generally available for any usage.
|
||||
import salt.modules.cmd
|
||||
__salt__ = {'cmd.run': salt.modules.cmd._run_quiet}
|
||||
|
||||
|
||||
def _kernel():
|
||||
|
@ -26,20 +33,32 @@ def _kernel():
|
|||
# Provides:
|
||||
# kernel
|
||||
grains = {}
|
||||
grains['kernel'] = subprocess.Popen(['uname', '-s'],
|
||||
stdout=subprocess.PIPE).communicate()[0].strip()
|
||||
grains['kernel'] = __salt__['cmd.run']('uname -s').strip()
|
||||
|
||||
if grains['kernel'] == 'aix':
|
||||
grains['kernelrelease'] = subprocess.Popen(['oslevel', '-s'],
|
||||
stdout=subprocess.PIPE).communicate()[0].strip()
|
||||
grains['kernelrelease'] = __salt__['cmd.run']('oslevel -s').strip()
|
||||
else:
|
||||
grains['kernelrelease'] = subprocess.Popen(['uname', '-r'],
|
||||
stdout=subprocess.PIPE).communicate()[0].strip()
|
||||
grains['kernelrelease'] = __salt__['cmd.run']('uname -r').strip()
|
||||
if 'kernel' not in grains:
|
||||
grains['kernel'] = 'Unknown'
|
||||
if not grains['kernel']:
|
||||
grains['kernel'] = 'Unknown'
|
||||
return grains
|
||||
|
||||
def _windows_cpudata():
|
||||
'''
|
||||
Return the cpu information for Windows systems architecture
|
||||
'''
|
||||
# Provides:
|
||||
# cpuarch
|
||||
# num_cpus
|
||||
# cpu_model
|
||||
grains = {}
|
||||
grains['cpuarch'] = platform.machine()
|
||||
if 'NUMBER_OF_PROCESSORS' in os.environ:
|
||||
grains['num_cpus'] = os.environ['NUMBER_OF_PROCESSORS']
|
||||
grains['cpu_model'] = platform.processor()
|
||||
return grains
|
||||
|
||||
def _linux_cpudata():
|
||||
'''
|
||||
|
@ -53,10 +72,17 @@ def _linux_cpudata():
|
|||
grains = {}
|
||||
cpuinfo = '/proc/cpuinfo'
|
||||
# Grab the Arch
|
||||
arch = subprocess.Popen(['uname', '-m'],
|
||||
stdout=subprocess.PIPE).communicate()[0].strip()
|
||||
arch = __salt__['cmd.run']('uname -m').strip()
|
||||
grains['cpuarch'] = arch
|
||||
if not grains['cpuarch']:
|
||||
# Some systems such as Debian don't like uname -m
|
||||
# so fallback gracefully to the processor type
|
||||
if not grains['cpuarch'] or grains['cpuarch'] == 'unknown':
|
||||
arch = __salt__['cmd.run']('uname -p')
|
||||
grains['cpuarch'] = arch
|
||||
if not grains['cpuarch'] or grains['cpuarch'] == 'unknown':
|
||||
arch = __salt__['cmd.run']('uname -i')
|
||||
grains['cpuarch'] = arch
|
||||
if not grains['cpuarch'] or grains['cpuarch'] == 'unknown':
|
||||
grains['cpuarch'] = 'Unknown'
|
||||
# Parse over the cpuinfo file
|
||||
if os.path.isfile(cpuinfo):
|
||||
|
@ -84,22 +110,16 @@ def _freebsd_cpudata():
|
|||
Return cpu information for FreeBSD systems
|
||||
'''
|
||||
grains = {}
|
||||
grains['cpuarch'] = subprocess.Popen(
|
||||
'/sbin/sysctl hw.machine',
|
||||
shell=True,
|
||||
stdout=subprocess.PIPE
|
||||
).communicate()[0].split(':')[1].strip()
|
||||
grains['num_cpus'] = subprocess.Popen(
|
||||
'/sbin/sysctl hw.ncpu',
|
||||
shell=True,
|
||||
stdout=subprocess.PIPE
|
||||
).communicate()[0].split(':')[1].strip()
|
||||
grains['cpu_model'] = subprocess.Popen(
|
||||
'/sbin/sysctl hw.model',
|
||||
shell=True,
|
||||
stdout=subprocess.PIPE
|
||||
).communicate()[0].split(':')[1].strip()
|
||||
grains['cpu_flags'] = []
|
||||
sysctl = salt.utils.which('sysctl')
|
||||
|
||||
if sysctl:
|
||||
machine_cmd = '{0} -n hw.machine'.format(sysctl)
|
||||
ncpu_cmd = '{0} -n hw.ncpu'.format(sysctl)
|
||||
model_cpu = '{0} -n hw.model'.format(sysctl)
|
||||
grains['num_cpus'] = __salt__['cmd.run'](ncpu_cmd).strip()
|
||||
grains['cpu_model'] = __salt__['cmd.run'](model_cpu).strip()
|
||||
grains['cpuarch'] = __salt__['cmd.run'](machine_cmd).strip()
|
||||
grains['cpu_flags'] = []
|
||||
return grains
|
||||
|
||||
|
||||
|
@ -112,6 +132,7 @@ def _memdata(osdata):
|
|||
grains = {'mem_total': 0}
|
||||
if osdata['kernel'] == 'Linux':
|
||||
meminfo = '/proc/meminfo'
|
||||
|
||||
if os.path.isfile(meminfo):
|
||||
for line in open(meminfo, 'r').readlines():
|
||||
comps = line.split(':')
|
||||
|
@ -119,13 +140,20 @@ def _memdata(osdata):
|
|||
continue
|
||||
if comps[0].strip() == 'MemTotal':
|
||||
grains['mem_total'] = int(comps[1].split()[0]) / 1024
|
||||
elif osdata['kernel'] == 'FreeBSD':
|
||||
mem = subprocess.Popen(
|
||||
'/sbin/sysctl hw.physmem',
|
||||
shell=True,
|
||||
stdout=subprocess.PIPE
|
||||
).communicate()[0].split(':')[1].strip()
|
||||
grains['mem_total'] = str(int(mem) / 1024 / 1024)
|
||||
elif osdata['kernel'] in ('FreeBSD','OpenBSD'):
|
||||
sysctl = salt.utils.which('sysctl')
|
||||
if sysctl:
|
||||
mem = __salt__['cmd.run']('{0} -n hw.physmem'.format(sysctl)).strip()
|
||||
grains['mem_total'] = str(int(mem) / 1024 / 1024)
|
||||
elif osdata['kernel'] == 'Windows':
|
||||
for line in __salt__['cmd.run']('SYSTEMINFO /FO LIST').split('\n'):
|
||||
comps = line.split(':')
|
||||
if not len(comps) > 1:
|
||||
continue
|
||||
if comps[0].strip() == 'Total Physical Memory':
|
||||
grains['mem_total'] = int(comps[1].split()[0].replace(',', ''))
|
||||
break
|
||||
|
||||
return grains
|
||||
|
||||
|
||||
|
@ -138,24 +166,61 @@ def _virtual(osdata):
|
|||
# Provides:
|
||||
# virtual
|
||||
grains = {'virtual': 'physical'}
|
||||
if 'Linux OpenBSD SunOS HP-UX'.count(osdata['kernel']):
|
||||
if os.path.isdir('/proc/vz'):
|
||||
lspci = salt.utils.which('lspci')
|
||||
dmidecode = salt.utils.which('dmidecode')
|
||||
|
||||
if dmidecode:
|
||||
output = __salt__['cmd.run']('dmidecode')
|
||||
# Product Name: VirtualBox
|
||||
if 'Vendor: QEMU' in output:
|
||||
# FIXME: Make this detect between kvm or qemu
|
||||
grains['virtual'] = 'kvm'
|
||||
elif 'VirtualBox' in output:
|
||||
grains['virtual'] = 'VirtualBox'
|
||||
# Product Name: VMware Virtual Platform
|
||||
elif 'VMware' in output:
|
||||
grains['virtual'] = 'VMware'
|
||||
# Manufacturer: Microsoft Corporation
|
||||
# Product Name: Virtual Machine
|
||||
elif 'Manufacturer: Microsoft' in output and 'Virtual Machine' in output:
|
||||
grains['virtual'] = 'VirtualPC'
|
||||
# Fall back to lspci if dmidecode isn't available
|
||||
elif lspci:
|
||||
model = __salt__['cmd.run']('lspci').lower()
|
||||
if 'vmware' in model:
|
||||
grains['virtual'] = 'VMware'
|
||||
# 00:04.0 System peripheral: InnoTek Systemberatung GmbH VirtualBox Guest Service
|
||||
elif 'virtualbox' in model:
|
||||
grains['virtual'] = 'VirtualBox'
|
||||
elif 'qemu' in model:
|
||||
grains['virtual'] = 'kvm'
|
||||
choices = ('Linux', 'OpenBSD', 'SunOS', 'HP-UX')
|
||||
isdir = os.path.isdir
|
||||
if osdata['kernel'] in choices:
|
||||
if isdir('/proc/vz'):
|
||||
if os.path.isfile('/proc/vz/version'):
|
||||
grains['virtual'] = 'openvzhn'
|
||||
else:
|
||||
grains['virtual'] = 'openvzve'
|
||||
if os.path.isdir('/.SUNWnative'):
|
||||
elif isdir('/proc/sys/xen') or isdir('/sys/bus/xen') or isdir('/proc/xen'):
|
||||
grains['virtual'] = 'xen'
|
||||
if os.path.isfile('/proc/xen/xsd_kva'):
|
||||
grains['virtual_subtype'] = 'Xen Dom0'
|
||||
else:
|
||||
if os.path.isfile('/proc/xen/capabilities'):
|
||||
grains['virtual_subtype'] = 'Xen full virt DomU'
|
||||
else:
|
||||
grains['virtual_subtype'] = 'Xen paravirt DomU'
|
||||
elif isdir('/.SUNWnative'):
|
||||
grains['virtual'] = 'zone'
|
||||
if os.path.isfile('/proc/cpuinfo'):
|
||||
if open('/proc/cpuinfo', 'r').read().count('QEMU Virtual CPU'):
|
||||
elif os.path.isfile('/proc/cpuinfo'):
|
||||
if 'QEMU Virtual CPU' in open('/proc/cpuinfo', 'r').read():
|
||||
grains['virtual'] = 'kvm'
|
||||
elif osdata['kernel'] == 'FreeBSD':
|
||||
model = subprocess.Popen(
|
||||
'/sbin/sysctl hw.model',
|
||||
shell=True,
|
||||
stdout=subprocess.PIPE
|
||||
).communicate()[0].split(':')[1].strip()
|
||||
if model.count('QEMU Virtual CPU'):
|
||||
sysctl = salt.utils.which('sysctl')
|
||||
if sysctl:
|
||||
model = __salt__['cmd.run']('{0} hw.model'.format(sysctl)).strip()
|
||||
if 'QEMU Virtual CPU' in model:
|
||||
grains['virtual'] = 'kvm'
|
||||
return grains
|
||||
|
||||
|
@ -165,30 +230,100 @@ def _ps(osdata):
|
|||
Return the ps grain
|
||||
'''
|
||||
grains = {}
|
||||
grains['ps'] = 'ps auxwww' if\
|
||||
'FreeBSD NetBSD OpenBSD Darwin'.count(osdata['os']) else 'ps -ef'
|
||||
bsd_choices = ('FreeBSD', 'NetBSD', 'OpenBSD', 'Darwin')
|
||||
if osdata['os'] in bsd_choices:
|
||||
grains['ps'] = 'ps auxwww'
|
||||
else:
|
||||
grains['ps'] = 'ps -efH'
|
||||
return grains
|
||||
|
||||
|
||||
def _linux_platform_data(osdata):
|
||||
'''
|
||||
The platform module is very smart about figuring out linux distro
|
||||
information. Instead of re-inventing the wheel, lets use it!
|
||||
'''
|
||||
# Provides:
|
||||
# osrelease
|
||||
# oscodename
|
||||
grains = {}
|
||||
(osname, osrelease, oscodename) = platform.dist()
|
||||
if 'os' not in osdata and osname:
|
||||
grains['os'] = osname
|
||||
if osrelease:
|
||||
grains['osrelease'] = osrelease
|
||||
if oscodename:
|
||||
grains['oscodename'] = oscodename
|
||||
return grains
|
||||
|
||||
def _windows_platform_data(osdata):
|
||||
'''
|
||||
Use the platform module for as much as we can.
|
||||
'''
|
||||
# Provides:
|
||||
# osrelease
|
||||
# oscodename
|
||||
grains = {}
|
||||
(osname, hostname, osrelease, osversion, machine, processor) = platform.uname()
|
||||
if 'os' not in osdata and osname:
|
||||
grains['os'] = osname
|
||||
if osrelease:
|
||||
grains['osrelease'] = osrelease
|
||||
if osversion:
|
||||
grains['osversion'] = osversion
|
||||
return grains
|
||||
|
||||
def os_data():
|
||||
'''
|
||||
Return grains pertaining to the operating system
|
||||
'''
|
||||
grains = {}
|
||||
if 'os' in os.environ:
|
||||
if os.environ['os'].startswith('Windows'):
|
||||
grains['os'] = 'Windows'
|
||||
grains['kernel'] = 'Windows'
|
||||
grains.update(_memdata(grains))
|
||||
grains.update(_windows_platform_data(grains))
|
||||
grains.update(_windows_cpudata())
|
||||
return grains
|
||||
grains.update(_kernel())
|
||||
|
||||
if grains['kernel'] == 'Linux':
|
||||
# Add lsb grains on any distro with lsb-release
|
||||
if os.path.isfile('/etc/lsb-release'):
|
||||
for line in open('/etc/lsb-release').readlines():
|
||||
# Matches any possible format:
|
||||
# DISTRIB_ID="Ubuntu"
|
||||
# DISTRIB_ID='Mageia'
|
||||
# DISTRIB_ID=Fedora
|
||||
# DISTRIB_RELEASE='10.10'
|
||||
# DISTRIB_CODENAME='squeeze'
|
||||
# DISTRIB_DESCRIPTION='Ubuntu 10.10'
|
||||
regex = re.compile('^(DISTRIB_(?:ID|RELEASE|CODENAME|DESCRIPTION))=(?:\'|")?([\w\s\.-_]+)(?:\'|")?')
|
||||
match = regex.match(line)
|
||||
if match:
|
||||
# Adds: lsb_distrib_{id,release,codename,description}
|
||||
grains['lsb_{0}'.format(match.groups()[0].lower())] = match.groups()[1].rstrip()
|
||||
if os.path.isfile('/etc/arch-release'):
|
||||
grains['os'] = 'Arch'
|
||||
elif os.path.isfile('/etc/debian_version'):
|
||||
grains['os'] = 'Debian'
|
||||
if 'lsb_distrib_id' in grains:
|
||||
if 'Ubuntu' in grains['lsb_distrib_id']:
|
||||
grains['os'] = 'Ubuntu'
|
||||
elif os.path.isfile('/etc/issue.net') and \
|
||||
'Ubuntu' in open('/etc/issue.net').readline():
|
||||
grains['os'] = 'Ubuntu'
|
||||
elif os.path.isfile('/etc/gentoo-release'):
|
||||
grains['os'] = 'Gentoo'
|
||||
elif os.path.isfile('/etc/fedora-version'):
|
||||
elif os.path.isfile('/etc/fedora-release'):
|
||||
grains['os'] = 'Fedora'
|
||||
elif os.path.isfile('/etc/mandriva-version'):
|
||||
grains['os'] = 'Mandriva'
|
||||
elif os.path.isfile('/etc/mandrake-version'):
|
||||
grains['os'] = 'Mandrake'
|
||||
elif os.path.isfile('/etc/mageia-version'):
|
||||
grains['os'] = 'Mageia'
|
||||
elif os.path.isfile('/etc/meego-version'):
|
||||
grains['os'] = 'MeeGo'
|
||||
elif os.path.isfile('/etc/vmware-version'):
|
||||
|
@ -206,22 +341,27 @@ def os_data():
|
|||
grains['os'] = 'OEL'
|
||||
elif os.path.isfile('/etc/redhat-release'):
|
||||
data = open('/etc/redhat-release', 'r').read()
|
||||
if data.count('centos'):
|
||||
if 'centos' in data.lower():
|
||||
grains['os'] = 'CentOS'
|
||||
elif data.count('scientific'):
|
||||
elif 'scientific' in data.lower():
|
||||
grains['os'] = 'Scientific'
|
||||
else:
|
||||
grains['os'] = 'RedHat'
|
||||
elif os.path.isfile('/etc/SuSE-release'):
|
||||
data = open('/etc/SuSE-release', 'r').read()
|
||||
if data.count('SUSE LINUX Enterprise Server'):
|
||||
if 'SUSE LINUX Enterprise Server' in data:
|
||||
grains['os'] = 'SLES'
|
||||
elif data.count('SUSE LINUX Enterprise Desktop'):
|
||||
elif 'SUSE LINUX Enterprise Desktop' in data:
|
||||
grains['os'] = 'SLED'
|
||||
elif data.count('openSUSE'):
|
||||
elif 'openSUSE' in data:
|
||||
grains['os'] = 'openSUSE'
|
||||
else:
|
||||
grains['os'] = 'SUSE'
|
||||
# Use the already intelligent platform module to get distro info
|
||||
grains.update(_linux_platform_data(grains))
|
||||
# If the Linux version can not be determined
|
||||
if not 'os' in grains:
|
||||
grains['os'] = 'Unknown {0}'.format(grains['kernel'])
|
||||
elif grains['kernel'] == 'sunos':
|
||||
grains['os'] = 'Solaris'
|
||||
elif grains['kernel'] == 'VMkernel':
|
||||
|
@ -230,18 +370,21 @@ def os_data():
|
|||
grains['os'] = 'MacOS'
|
||||
else:
|
||||
grains['os'] = grains['kernel']
|
||||
|
||||
if grains['kernel'] == 'Linux':
|
||||
grains.update(_linux_cpudata())
|
||||
elif grains['kernel'] == 'FreeBSD':
|
||||
elif grains['kernel'] in ('FreeBSD', 'OpenBSD'):
|
||||
# _freebsd_cpudata works on OpenBSD as well.
|
||||
grains.update(_freebsd_cpudata())
|
||||
|
||||
grains.update(_memdata(grains))
|
||||
|
||||
# Load the virtual machine info
|
||||
|
||||
grains.update(_virtual(grains))
|
||||
grains.update(_ps(grains))
|
||||
|
||||
# Get the hardware and bios data
|
||||
grains.update(_hw_data(grains))
|
||||
|
||||
return grains
|
||||
|
||||
|
||||
|
@ -279,7 +422,7 @@ def pythonversion():
|
|||
'''
|
||||
# Provides:
|
||||
# pythonversion
|
||||
return {'pythonversion': list(sys.version_info)}
|
||||
return {'pythonversion': tuple(sys.version_info)}
|
||||
|
||||
def pythonpath():
|
||||
'''
|
||||
|
@ -297,3 +440,84 @@ def saltpath():
|
|||
# saltpath
|
||||
path = os.path.abspath(os.path.join(__file__, os.path.pardir))
|
||||
return {'saltpath': os.path.dirname(path)}
|
||||
|
||||
|
||||
# Relatively complex mini-algorithm to iterate over the various
|
||||
# sections of dmidecode output and return matches for specific
|
||||
# lines containing data we want, but only in the right section.
|
||||
def _dmidecode_data(regex_dict):
|
||||
'''
|
||||
Parse the output of dmidecode in a generic fashion that can
|
||||
be used for the multiple system types which have dmidecode.
|
||||
'''
|
||||
# NOTE: This function might gain support for smbios instead
|
||||
# of dmidecode when salt gets working Solaris support
|
||||
ret = {}
|
||||
|
||||
# No use running if dmidecode isn't in the path
|
||||
if not salt.utils.which('dmidecode'):
|
||||
return ret
|
||||
|
||||
out = __salt__['cmd.run']('dmidecode')
|
||||
|
||||
for section in regex_dict:
|
||||
section_found = False
|
||||
|
||||
# Look at every line for the right section
|
||||
for line in out.split('\n'):
|
||||
if not line: continue
|
||||
# We've found it, woohoo!
|
||||
if re.match(section, line):
|
||||
section_found = True
|
||||
continue
|
||||
if not section_found:
|
||||
continue
|
||||
|
||||
# Now that a section has been found, find the data
|
||||
for item in regex_dict[section]:
|
||||
# Examples:
|
||||
# Product Name: 64639SU
|
||||
# Version: 7LETC1WW (2.21 )
|
||||
regex = re.compile('\s+{0}\s+(.*)$'.format(item))
|
||||
grain = regex_dict[section][item]
|
||||
# Skip to the next iteration if this grain
|
||||
# has been found in the dmidecode output.
|
||||
if grain in ret: continue
|
||||
|
||||
match = regex.match(line)
|
||||
|
||||
# Finally, add the matched data to the grains returned
|
||||
if match:
|
||||
ret[grain] = match.group(1).strip()
|
||||
return ret
|
||||
|
||||
|
||||
def _hw_data(osdata):
|
||||
'''
|
||||
Get system specific hardware data from dmidecode
|
||||
|
||||
Provides
|
||||
biosversion
|
||||
productname
|
||||
manufacturer
|
||||
serialnumber
|
||||
biosreleasedate
|
||||
|
||||
.. versionadded:: 0.9.5
|
||||
'''
|
||||
grains = {}
|
||||
# TODO: *BSD dmidecode output
|
||||
if osdata['kernel'] == 'Linux':
|
||||
linux_dmi_regex = {
|
||||
'BIOS [Ii]nformation': {
|
||||
'[Vv]ersion:': 'biosversion',
|
||||
'[Rr]elease [Dd]ate:': 'biosreleasedate',
|
||||
},
|
||||
'[Ss]ystem [Ii]nformation': {
|
||||
'Manufacturer:': 'manufacturer',
|
||||
'Product(?: Name)?:': 'productname',
|
||||
'Serial Number:': 'serialnumber',
|
||||
},
|
||||
}
|
||||
grains.update(_dmidecode_data(linux_dmi_regex))
|
||||
return grains
|
||||
|
|
|
@ -11,26 +11,22 @@ import imp
|
|||
import logging
|
||||
import os
|
||||
import salt
|
||||
from salt.exceptions import LoaderError
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
salt_base_path = os.path.dirname(salt.__file__)
|
||||
|
||||
|
||||
class LoaderError(Exception):
|
||||
'''
|
||||
Custom exception class.
|
||||
'''
|
||||
|
||||
pass
|
||||
|
||||
|
||||
def minion_mods(opts):
|
||||
'''
|
||||
Returns the minion modules
|
||||
'''
|
||||
extra_dirs = []
|
||||
extra_dirs = [
|
||||
os.path.join(opts['extension_modules'],
|
||||
'modules')
|
||||
]
|
||||
if 'module_dirs' in opts:
|
||||
extra_dirs = opts['module_dirs']
|
||||
extra_dirs.extend(opts['module_dirs'])
|
||||
module_dirs = [
|
||||
os.path.join(salt_base_path, 'modules'),
|
||||
] + extra_dirs
|
||||
|
@ -42,9 +38,12 @@ def returners(opts):
|
|||
'''
|
||||
Returns the returner modules
|
||||
'''
|
||||
extra_dirs = []
|
||||
extra_dirs = [
|
||||
os.path.join(opts['extension_modules'],
|
||||
'returners')
|
||||
]
|
||||
if 'returner_dirs' in opts:
|
||||
extra_dirs = opts['returner_dirs']
|
||||
extra_dirs.extend(opts['returner_dirs'])
|
||||
module_dirs = [
|
||||
os.path.join(salt_base_path, 'returners'),
|
||||
] + extra_dirs
|
||||
|
@ -56,9 +55,12 @@ def states(opts, functions):
|
|||
'''
|
||||
Returns the returner modules
|
||||
'''
|
||||
extra_dirs = []
|
||||
extra_dirs = [
|
||||
os.path.join(opts['extension_modules'],
|
||||
'states')
|
||||
]
|
||||
if 'states_dirs' in opts:
|
||||
extra_dirs = opts['states_dirs']
|
||||
extra_dirs.extend(opts['states_dirs'])
|
||||
module_dirs = [
|
||||
os.path.join(salt_base_path, 'states'),
|
||||
] + extra_dirs
|
||||
|
@ -72,9 +74,12 @@ def render(opts, functions):
|
|||
'''
|
||||
Returns the render modules
|
||||
'''
|
||||
extra_dirs = []
|
||||
extra_dirs = [
|
||||
os.path.join(opts['extension_modules'],
|
||||
'renderers')
|
||||
]
|
||||
if 'render_dirs' in opts:
|
||||
extra_dirs = opts['render_dirs']
|
||||
extra_dirs.extend(opts['render_dirs'])
|
||||
module_dirs = [
|
||||
os.path.join(salt_base_path, 'renderers'),
|
||||
] + extra_dirs
|
||||
|
@ -84,7 +89,7 @@ def render(opts, functions):
|
|||
rend = load.filter_func('render', pack)
|
||||
if opts['renderer'] not in rend:
|
||||
err = ('The renderer {0} is unavailable, this error is often because '
|
||||
'the needed software is unavailabe'.format(opts['renderer']))
|
||||
'the needed software is unavailable'.format(opts['renderer']))
|
||||
log.critical(err)
|
||||
raise LoaderError(err)
|
||||
return rend
|
||||
|
@ -212,7 +217,7 @@ class Loader(object):
|
|||
log.info('Cython is enabled in options put not present '
|
||||
'on the system path. Skipping Cython modules.')
|
||||
for mod_dir in self.module_dirs:
|
||||
if not mod_dir.startswith('/'):
|
||||
if not os.path.isabs(mod_dir):
|
||||
continue
|
||||
if not os.path.isdir(mod_dir):
|
||||
continue
|
||||
|
@ -247,12 +252,20 @@ class Loader(object):
|
|||
mod.__grains__ = self.grains
|
||||
|
||||
if pack:
|
||||
if type(pack) == type(list()):
|
||||
if isinstance(pack, list):
|
||||
for chunk in pack:
|
||||
setattr(mod, chunk['name'], chunk['value'])
|
||||
else:
|
||||
setattr(mod, pack['name'], pack['value'])
|
||||
|
||||
# Call a module's initialization method if it exists
|
||||
if hasattr(mod, '__init__'):
|
||||
if callable(mod.__init__):
|
||||
try:
|
||||
mod.__init__()
|
||||
except TypeError:
|
||||
pass
|
||||
|
||||
if hasattr(mod, '__virtual__'):
|
||||
if callable(mod.__virtual__):
|
||||
virtual = mod.__virtual__()
|
||||
|
@ -293,6 +306,7 @@ class Loader(object):
|
|||
funcs['sys.list_functions'] = lambda: self.list_funcs(funcs)
|
||||
funcs['sys.list_modules'] = lambda: self.list_modules(funcs)
|
||||
funcs['sys.doc'] = lambda module = '': self.get_docs(funcs, module)
|
||||
funcs['sys.reload_modules'] = lambda: True
|
||||
return funcs
|
||||
|
||||
def list_funcs(self, funcs):
|
||||
|
@ -338,7 +352,7 @@ class Loader(object):
|
|||
def gen_grains(self):
|
||||
'''
|
||||
Read the grains directory and execute all of the public callable
|
||||
members. then verify that the returns are python dict's and return a
|
||||
members. Then verify that the returns are python dict's and return a
|
||||
dict containing all of the returned values.
|
||||
'''
|
||||
grains = {}
|
||||
|
@ -347,14 +361,14 @@ class Loader(object):
|
|||
if not key[key.index('.') + 1:] == 'core':
|
||||
continue
|
||||
ret = fun()
|
||||
if not type(ret) == type(dict()):
|
||||
if not isinstance(ret, dict):
|
||||
continue
|
||||
grains.update(ret)
|
||||
for key, fun in funcs.items():
|
||||
if key[key.index('.') + 1:] == 'core':
|
||||
continue
|
||||
ret = fun()
|
||||
if not type(ret) == type(dict()):
|
||||
if not isinstance(ret, dict):
|
||||
continue
|
||||
grains.update(ret)
|
||||
return grains
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
salt.log
|
||||
~~~~~~~~
|
||||
|
||||
This is were Salt's logging get's setup.
|
||||
This is where Salt's logging gets set up.
|
||||
|
||||
|
||||
:copyright: 2011 :email:`Pedro Algarvio (pedro@algarvio.me)`
|
||||
|
|
114
salt/master.py
114
salt/master.py
|
@ -1,48 +1,48 @@
|
|||
'''
|
||||
This module contains all fo the routines needed to set up a master server, this
|
||||
This module contains all foo the routines needed to set up a master server, this
|
||||
involves preparing the three listeners and the workers needed by the master.
|
||||
'''
|
||||
|
||||
# Import python modules
|
||||
import cPickle as pickle
|
||||
import datetime
|
||||
import hashlib
|
||||
import logging
|
||||
import multiprocessing
|
||||
import os
|
||||
import re
|
||||
import shutil
|
||||
import tempfile
|
||||
import time
|
||||
import shutil
|
||||
import logging
|
||||
import hashlib
|
||||
import tempfile
|
||||
import datetime
|
||||
import multiprocessing
|
||||
|
||||
# Import zeromq
|
||||
from M2Crypto import RSA
|
||||
import zmq
|
||||
from M2Crypto import RSA
|
||||
|
||||
# Import salt modules
|
||||
import salt.client
|
||||
import salt.crypt
|
||||
import salt.payload
|
||||
import salt.utils
|
||||
import salt.client
|
||||
import salt.payload
|
||||
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def prep_jid(cachedir, load):
|
||||
def prep_jid(opts, load):
|
||||
'''
|
||||
Parses the job return directory, generates a job id and sets up the
|
||||
job id directory.
|
||||
'''
|
||||
jid_root = os.path.join(cachedir, 'jobs')
|
||||
serial = salt.payload.Serial(opts)
|
||||
jid_root = os.path.join(opts['cachedir'], 'jobs')
|
||||
jid = "{0:%Y%m%d%H%M%S%f}".format(datetime.datetime.now())
|
||||
|
||||
jid_dir = os.path.join(jid_root, jid)
|
||||
if not os.path.isdir(jid_dir):
|
||||
os.makedirs(jid_dir)
|
||||
pickle.dump(load, open(os.path.join(jid_dir, '.load.p'), 'w+'))
|
||||
serial.dump(load, open(os.path.join(jid_dir, '.load.p'), 'w+'))
|
||||
else:
|
||||
return prep_jid(load)
|
||||
return prep_jid(cachedir, load)
|
||||
return jid
|
||||
|
||||
|
||||
|
@ -63,7 +63,7 @@ class SMaster(object):
|
|||
'''
|
||||
Return the crypticle used for AES
|
||||
'''
|
||||
return salt.crypt.Crypticle(self.opts['aes'])
|
||||
return salt.crypt.Crypticle(self.opts, self.opts['aes'])
|
||||
|
||||
def __prep_key(self):
|
||||
'''
|
||||
|
@ -76,7 +76,9 @@ class SMaster(object):
|
|||
return open(keyfile, 'r').read()
|
||||
else:
|
||||
key = salt.crypt.Crypticle.generate_key_string()
|
||||
cumask = os.umask(191);
|
||||
open(keyfile, 'w+').write(key)
|
||||
os.umask(cumask)
|
||||
os.chmod(keyfile, 256)
|
||||
return key
|
||||
|
||||
|
@ -104,13 +106,16 @@ class Master(SMaster):
|
|||
for jid in os.listdir(jid_root):
|
||||
if int(cur) - int(jid[:10]) > self.opts['keep_jobs']:
|
||||
shutil.rmtree(os.path.join(jid_root, jid))
|
||||
time.sleep(60)
|
||||
try:
|
||||
time.sleep(60)
|
||||
except KeyboardInterrupt:
|
||||
break
|
||||
|
||||
def start(self):
|
||||
'''
|
||||
Turn on the master server components
|
||||
'''
|
||||
log.info('Starting the Salt Master')
|
||||
log.warn('Starting the Salt Master')
|
||||
multiprocessing.Process(target=self._clear_old_jobs).start()
|
||||
aes_funcs = AESFuncs(self.opts, self.crypticle)
|
||||
clear_funcs = ClearFuncs(
|
||||
|
@ -126,7 +131,13 @@ class Master(SMaster):
|
|||
aes_funcs,
|
||||
clear_funcs)
|
||||
reqserv.start_publisher()
|
||||
reqserv.run()
|
||||
|
||||
try:
|
||||
reqserv.run()
|
||||
except KeyboardInterrupt:
|
||||
# Shut the master down gracefully on SIGINT
|
||||
log.warn('Stopping the Salt Master')
|
||||
raise SystemExit('\nExiting on Ctrl-c')
|
||||
|
||||
|
||||
class Publisher(multiprocessing.Process):
|
||||
|
@ -135,7 +146,7 @@ class Publisher(multiprocessing.Process):
|
|||
commands.
|
||||
'''
|
||||
def __init__(self, opts):
|
||||
multiprocessing.Process.__init__(self)
|
||||
super(Publisher, self).__init__()
|
||||
self.opts = opts
|
||||
|
||||
def run(self):
|
||||
|
@ -153,10 +164,14 @@ class Publisher(multiprocessing.Process):
|
|||
pub_sock.bind(pub_uri)
|
||||
pull_sock.bind(pull_uri)
|
||||
|
||||
while True:
|
||||
package = pull_sock.recv()
|
||||
log.info('Publishing command')
|
||||
pub_sock.send(package)
|
||||
try:
|
||||
while True:
|
||||
package = pull_sock.recv()
|
||||
log.info('Publishing command')
|
||||
pub_sock.send(package)
|
||||
except KeyboardInterrupt:
|
||||
pub_sock.close()
|
||||
pull_sock.close()
|
||||
|
||||
|
||||
class ReqServer(object):
|
||||
|
@ -230,6 +245,7 @@ class MWorker(multiprocessing.Process):
|
|||
clear_funcs):
|
||||
multiprocessing.Process.__init__(self)
|
||||
self.opts = opts
|
||||
self.serial = salt.payload.Serial(opts)
|
||||
self.crypticle = crypticle
|
||||
self.aes_funcs = aes_funcs
|
||||
self.clear_funcs = clear_funcs
|
||||
|
@ -244,19 +260,27 @@ class MWorker(multiprocessing.Process):
|
|||
os.path.join(self.opts['sock_dir'], 'workers.ipc')
|
||||
)
|
||||
log.info('Worker binding to socket {0}'.format(w_uri))
|
||||
socket.connect(w_uri)
|
||||
try:
|
||||
socket.connect(w_uri)
|
||||
|
||||
while True:
|
||||
package = socket.recv()
|
||||
payload = salt.payload.unpackage(package)
|
||||
ret = salt.payload.package(self._handle_payload(payload))
|
||||
socket.send(ret)
|
||||
while True:
|
||||
package = socket.recv()
|
||||
payload = self.serial.loads(package)
|
||||
ret = self.serial.dumps(self._handle_payload(payload))
|
||||
socket.send(ret)
|
||||
except KeyboardInterrupt:
|
||||
socket.close()
|
||||
|
||||
def _handle_payload(self, payload):
|
||||
'''
|
||||
The _handle_payload method is the key method used to figure out what
|
||||
needs to be done with communication to the server
|
||||
'''
|
||||
try:
|
||||
key = payload['enc']
|
||||
load = payload['load']
|
||||
except KeyError:
|
||||
return ''
|
||||
return {'aes': self._handle_aes,
|
||||
'pub': self._handle_pub,
|
||||
'clear': self._handle_clear}[payload['enc']](payload['load'])
|
||||
|
@ -283,7 +307,7 @@ class MWorker(multiprocessing.Process):
|
|||
except:
|
||||
return ''
|
||||
if 'cmd' not in data:
|
||||
log.error('Recieved malformed command {0}'.format(data))
|
||||
log.error('Received malformed command {0}'.format(data))
|
||||
return {}
|
||||
log.info('AES payload received with command {0}'.format(data['cmd']))
|
||||
return self.aes_funcs.run_func(data['cmd'], data)
|
||||
|
@ -303,6 +327,7 @@ class AESFuncs(object):
|
|||
#
|
||||
def __init__(self, opts, crypticle):
|
||||
self.opts = opts
|
||||
self.serial = salt.payload.Serial(opts)
|
||||
self.crypticle = crypticle
|
||||
# Make a client
|
||||
self.local = salt.client.LocalClient(self.opts['conf_file'])
|
||||
|
@ -425,15 +450,15 @@ class AESFuncs(object):
|
|||
hn_dir = os.path.join(jid_dir, load['id'])
|
||||
if not os.path.isdir(hn_dir):
|
||||
os.makedirs(hn_dir)
|
||||
pickle.dump(load['return'],
|
||||
self.serial.dump(load['return'],
|
||||
open(os.path.join(hn_dir, 'return.p'), 'w+'))
|
||||
if 'out' in load:
|
||||
pickle.dump(load['out'],
|
||||
self.serial.dump(load['out'],
|
||||
open(os.path.join(hn_dir, 'out.p'), 'w+'))
|
||||
|
||||
def _syndic_return(self, load):
|
||||
'''
|
||||
Recieve a syndic minion return and format it to look like returns from
|
||||
Receive a syndic minion return and format it to look like returns from
|
||||
individual minions.
|
||||
'''
|
||||
# Verify the load
|
||||
|
@ -480,10 +505,13 @@ class AESFuncs(object):
|
|||
# If the command will make a recursive publish don't run
|
||||
if re.match('publish.*', clear_load['fun']):
|
||||
return {}
|
||||
# Check the permisions for this minion
|
||||
# Check the permissions for this minion
|
||||
if not self.__verify_minion(clear_load['id'], clear_load['tok']):
|
||||
# The minion is not who it says it is!
|
||||
# We don't want to listen to it!
|
||||
jid = clear_load['jid']
|
||||
msg = 'Minion id {0} is not who it says it is!'.format(jid)
|
||||
log.warn(msg)
|
||||
return {}
|
||||
perms = set()
|
||||
for match in self.opts['peer']:
|
||||
|
@ -498,7 +526,7 @@ class AESFuncs(object):
|
|||
if not good:
|
||||
return {}
|
||||
# Set up the publication payload
|
||||
jid = prep_jid(self.opts['cachedir'], clear_load)
|
||||
jid = prep_jid(self.opts, clear_load)
|
||||
payload = {'enc': 'aes'}
|
||||
load = {
|
||||
'fun': clear_load['fun'],
|
||||
|
@ -523,7 +551,7 @@ class AESFuncs(object):
|
|||
os.path.join(self.opts['sock_dir'], 'publish_pull.ipc')
|
||||
)
|
||||
pub_sock.connect(pull_uri)
|
||||
pub_sock.send(salt.payload.package(payload))
|
||||
pub_sock.send(self.serial.dumps(payload))
|
||||
# Run the client get_returns method
|
||||
return self.local.get_returns(
|
||||
jid,
|
||||
|
@ -556,12 +584,13 @@ class ClearFuncs(object):
|
|||
Set up functions that are safe to execute when commands sent to the master
|
||||
without encryption and authentication
|
||||
'''
|
||||
# The ClearFuncs object encasulates the functions that can be executed in
|
||||
# The ClearFuncs object encapsulates the functions that can be executed in
|
||||
# the clear:
|
||||
# publish (The publish from the LocalClient)
|
||||
# _auth
|
||||
def __init__(self, opts, key, master_key, crypticle):
|
||||
self.opts = opts
|
||||
self.serial = salt.payload.Serial(opts)
|
||||
self.key = key
|
||||
self.master_key = master_key
|
||||
self.crypticle = crypticle
|
||||
|
@ -608,7 +637,7 @@ class ClearFuncs(object):
|
|||
# 1. Verify that the key we are receiving matches the stored key
|
||||
# 2. Store the key if it is not there
|
||||
# 3. make an rsa key with the pub key
|
||||
# 4. encrypt the aes key as an encrypted pickle
|
||||
# 4. encrypt the aes key as an encrypted salt.payload
|
||||
# 5. package the return and return it
|
||||
log.info('Authentication request from %(id)s', load)
|
||||
pubfn = os.path.join(self.opts['pki_dir'],
|
||||
|
@ -655,7 +684,7 @@ class ClearFuncs(object):
|
|||
else:
|
||||
log.info(
|
||||
'Authentication failed from host %(id)s, the key is in '
|
||||
'pending and needs to be accepted with saltkey -a %(id)s',
|
||||
'pending and needs to be accepted with salt-key -a %(id)s',
|
||||
load
|
||||
)
|
||||
return {'enc': 'clear',
|
||||
|
@ -666,6 +695,7 @@ class ClearFuncs(object):
|
|||
pass
|
||||
else:
|
||||
# Something happened that I have not accounted for, FAIL!
|
||||
log.warn('Unaccounted for authentication failure')
|
||||
return {'enc': 'clear',
|
||||
'load': {'ret': False}}
|
||||
|
||||
|
@ -696,7 +726,7 @@ class ClearFuncs(object):
|
|||
if not os.path.isdir(jid_dir):
|
||||
os.makedirs(jid_dir)
|
||||
# Save the invocation information
|
||||
pickle.dump(clear_load, open(os.path.join(jid_dir, '.load.p'), 'w+'))
|
||||
self.serial.dump(clear_load, open(os.path.join(jid_dir, '.load.p'), 'w+'))
|
||||
# Set up the payload
|
||||
payload = {'enc': 'aes'}
|
||||
load = {
|
||||
|
@ -718,6 +748,6 @@ class ClearFuncs(object):
|
|||
os.path.join(self.opts['sock_dir'], 'publish_pull.ipc')
|
||||
)
|
||||
pub_sock.connect(pull_uri)
|
||||
pub_sock.send(salt.payload.package(payload))
|
||||
pub_sock.send(self.serial.dumps(payload))
|
||||
return {'enc': 'clear',
|
||||
'load': {'jid': clear_load['jid']}}
|
||||
|
|
250
salt/minion.py
250
salt/minion.py
|
@ -3,9 +3,12 @@ Routines to set up a minion
|
|||
'''
|
||||
|
||||
# Import python libs
|
||||
import BaseHTTPServer
|
||||
import contextlib
|
||||
import glob
|
||||
import logging
|
||||
import multiprocessing
|
||||
import hashlib
|
||||
import os
|
||||
import re
|
||||
import shutil
|
||||
|
@ -13,18 +16,21 @@ import tempfile
|
|||
import threading
|
||||
import time
|
||||
import traceback
|
||||
import urllib2
|
||||
import urlparse
|
||||
|
||||
# Import zeromq libs
|
||||
import zmq
|
||||
|
||||
# Import salt libs
|
||||
from salt.crypt import AuthenticationError
|
||||
from salt.exceptions import AuthenticationError, MinionError
|
||||
import salt.client
|
||||
import salt.crypt
|
||||
import salt.loader
|
||||
import salt.modules
|
||||
import salt.returners
|
||||
import salt.utils
|
||||
import salt.payload
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
@ -37,13 +43,6 @@ log = logging.getLogger(__name__)
|
|||
# 6. handle publications
|
||||
|
||||
|
||||
class MinionError(Exception):
|
||||
'''
|
||||
Custom exception class.
|
||||
'''
|
||||
pass
|
||||
|
||||
|
||||
class SMinion(object):
|
||||
'''
|
||||
Create an object that has loaded all of the minion module functions,
|
||||
|
@ -54,12 +53,18 @@ class SMinion(object):
|
|||
def __init__(self, opts):
|
||||
# Generate all of the minion side components
|
||||
self.opts = opts
|
||||
self.gen_modules()
|
||||
|
||||
def gen_modules(self):
|
||||
'''
|
||||
Load all of the modules for the minion
|
||||
'''
|
||||
self.functions = salt.loader.minion_mods(self.opts)
|
||||
self.returners = salt.loader.returners(self.opts)
|
||||
self.states = salt.loader.states(self.opts, self.functions)
|
||||
self.rend = salt.loader.render(self.opts, self.functions)
|
||||
self.matcher = Matcher(self.opts, self.functions)
|
||||
|
||||
self.functions['sys.reload_modules'] = self.gen_modules
|
||||
|
||||
class Minion(object):
|
||||
'''
|
||||
|
@ -71,9 +76,14 @@ class Minion(object):
|
|||
Pass in the options dict
|
||||
'''
|
||||
self.opts = opts
|
||||
self.serial = salt.payload.Serial(self.opts)
|
||||
self.mod_opts = self.__prep_mod_opts()
|
||||
self.functions, self.returners = self.__load_modules()
|
||||
self.matcher = Matcher(self.opts, self.functions)
|
||||
if hasattr(self,'_syndic') and self._syndic:
|
||||
log.warn('Starting the Salt Syndic Minion')
|
||||
else:
|
||||
log.warn('Starting the Salt Minion')
|
||||
self.authenticate()
|
||||
|
||||
def __prep_mod_opts(self):
|
||||
|
@ -122,7 +132,7 @@ class Minion(object):
|
|||
# Verify that the publication applies to this minion
|
||||
if 'tgt_type' in data:
|
||||
if not getattr(self.matcher,
|
||||
data['tgt_type'] + '_match')(data['tgt']):
|
||||
'{0}_match'.format(data['tgt_type']))(data['tgt']):
|
||||
return
|
||||
else:
|
||||
if not self.matcher.glob_match(data['tgt']):
|
||||
|
@ -152,8 +162,12 @@ class Minion(object):
|
|||
Override this method if you wish to handle the decoded
|
||||
data differently.
|
||||
'''
|
||||
if isinstance(data['fun'], str):
|
||||
if data['fun'] == 'sys.reload_modules':
|
||||
self.functions, self.returners = self.__load_modules()
|
||||
|
||||
if self.opts['multiprocessing']:
|
||||
if type(data['fun']) == type(list()):
|
||||
if isinstance(data['fun'], list):
|
||||
multiprocessing.Process(
|
||||
target=lambda: self._thread_multi_return(data)
|
||||
).start()
|
||||
|
@ -162,7 +176,7 @@ class Minion(object):
|
|||
target=lambda: self._thread_return(data)
|
||||
).start()
|
||||
else:
|
||||
if type(data['fun']) == type(list()):
|
||||
if isinstance(data['fun'], list):
|
||||
threading.Thread(
|
||||
target=lambda: self._thread_multi_return(data)
|
||||
).start()
|
||||
|
@ -180,10 +194,8 @@ class Minion(object):
|
|||
for ind in range(0, len(data['arg'])):
|
||||
try:
|
||||
arg = eval(data['arg'][ind])
|
||||
if isinstance(arg, str) \
|
||||
or isinstance(arg, list) \
|
||||
or isinstance(arg, int) \
|
||||
or isinstance(arg, dict):
|
||||
types = (int, str, dict, list)
|
||||
if type(arg) in types:
|
||||
data['arg'][ind] = arg
|
||||
else:
|
||||
data['arg'][ind] = str(data['arg'][ind])
|
||||
|
@ -196,10 +208,10 @@ class Minion(object):
|
|||
ret['return'] = self.functions[data['fun']](*data['arg'])
|
||||
except Exception as exc:
|
||||
trb = traceback.format_exc()
|
||||
log.warning('The minion function caused an exception: %s', exc)
|
||||
log.warning('The minion function caused an exception: {0}'.format(exc))
|
||||
ret['return'] = trb
|
||||
else:
|
||||
ret['return'] = '"%s" is not available.' % function_name
|
||||
ret['return'] = '"{0}" is not available.'.format(function_name)
|
||||
|
||||
ret['jid'] = data['jid']
|
||||
ret['fun'] = data['fun']
|
||||
|
@ -222,11 +234,8 @@ class Minion(object):
|
|||
for index in range(0, len(data['arg'][ind])):
|
||||
try:
|
||||
arg = eval(data['arg'][ind][index])
|
||||
# FIXME: do away the ugly here...
|
||||
if isinstance(arg, str) \
|
||||
or isinstance(arg, list) \
|
||||
or isinstance(arg, int) \
|
||||
or isinstance(arg, dict):
|
||||
types = (str, int, list, dict)
|
||||
if type(arg) in types:
|
||||
data['arg'][ind][index] = arg
|
||||
else:
|
||||
data['arg'][ind][index] = str(data['arg'][ind][index])
|
||||
|
@ -280,17 +289,9 @@ class Minion(object):
|
|||
except KeyError:
|
||||
pass
|
||||
payload['load'] = self.crypticle.dumps(load)
|
||||
socket.send_pyobj(payload)
|
||||
socket.send(self.serial.dumps(payload))
|
||||
return socket.recv()
|
||||
|
||||
def reload_functions(self):
|
||||
'''
|
||||
Reload the functions dict for this minion, reading in any new functions
|
||||
'''
|
||||
self.functions = self.__load_functions()
|
||||
log.debug('Refreshed functions, loaded functions: %s', self.functions)
|
||||
return True
|
||||
|
||||
def authenticate(self):
|
||||
'''
|
||||
Authenticate with the master, this method breaks the functional
|
||||
|
@ -306,17 +307,38 @@ class Minion(object):
|
|||
log.info('Authentication with master successful!')
|
||||
break
|
||||
log.info('Waiting for minion key to be accepted by the master.')
|
||||
time.sleep(10)
|
||||
time.sleep(self.opts['acceptance_wait_time'])
|
||||
self.aes = creds['aes']
|
||||
self.publish_port = creds['publish_port']
|
||||
self.crypticle = salt.crypt.Crypticle(self.aes)
|
||||
self.crypticle = salt.crypt.Crypticle(self.opts, self.aes)
|
||||
|
||||
def passive_refresh(self):
|
||||
'''
|
||||
Check to see if the salt refresh file has been laid down, if it has,
|
||||
refresh the functions and returners.
|
||||
'''
|
||||
if os.path.isfile(
|
||||
os.path.join(
|
||||
self.opts['cachedir'],
|
||||
'.module_refresh'
|
||||
)
|
||||
):
|
||||
self.functions, self.returners = self.__load_modules()
|
||||
os.remove(
|
||||
os.path.join(
|
||||
self.opts['cachedir'],
|
||||
'.module_refresh'
|
||||
)
|
||||
)
|
||||
|
||||
def tune_in(self):
|
||||
'''
|
||||
Lock onto the publisher. This is the main event loop for the minion
|
||||
'''
|
||||
master_pub = ('tcp://' + self.opts['master_ip'] +
|
||||
':' + str(self.publish_port))
|
||||
master_pub = 'tcp://{0}:{1}'.format(
|
||||
self.opts['master_ip'],
|
||||
str(self.publish_port)
|
||||
)
|
||||
context = zmq.Context()
|
||||
socket = context.socket(zmq.SUB)
|
||||
socket.setsockopt(zmq.SUBSCRIBE, '')
|
||||
|
@ -326,7 +348,7 @@ class Minion(object):
|
|||
while True:
|
||||
payload = None
|
||||
try:
|
||||
payload = socket.recv_pyobj(1)
|
||||
payload = self.serial.loads(socket.recv(1))
|
||||
self._handle_payload(payload)
|
||||
last = time.time()
|
||||
except:
|
||||
|
@ -341,15 +363,18 @@ class Minion(object):
|
|||
last = time.time()
|
||||
time.sleep(0.05)
|
||||
multiprocessing.active_children()
|
||||
while True:
|
||||
payload = None
|
||||
try:
|
||||
payload = socket.recv_pyobj(1)
|
||||
self._handle_payload(payload)
|
||||
except:
|
||||
pass
|
||||
time.sleep(0.05)
|
||||
multiprocessing.active_children()
|
||||
self.passive_refresh()
|
||||
else:
|
||||
while True:
|
||||
payload = None
|
||||
try:
|
||||
payload = self.serial(socket.recv(1))
|
||||
self._handle_payload(payload)
|
||||
except:
|
||||
pass
|
||||
time.sleep(0.05)
|
||||
multiprocessing.active_children()
|
||||
self.passive_refresh()
|
||||
|
||||
|
||||
class Syndic(salt.client.LocalClient, Minion):
|
||||
|
@ -358,6 +383,7 @@ class Syndic(salt.client.LocalClient, Minion):
|
|||
authenticate with a higher level master.
|
||||
'''
|
||||
def __init__(self, opts):
|
||||
self._syndic = True
|
||||
salt.client.LocalClient.__init__(self, opts['_master_conf_file'])
|
||||
Minion.__init__(self, opts)
|
||||
|
||||
|
@ -443,11 +469,11 @@ class Matcher(object):
|
|||
'''
|
||||
matcher = 'glob'
|
||||
for item in data:
|
||||
if type(item) == type(dict()):
|
||||
if isinstance(item, dict):
|
||||
if 'match' in item:
|
||||
matcher = item['match']
|
||||
if hasattr(self, matcher + '_match'):
|
||||
return getattr(self, matcher + '_match')(match)
|
||||
return getattr(self, '{0}_match'.format(matcher))(match)
|
||||
else:
|
||||
log.error('Attempting to match with unknown matcher: %s', matcher)
|
||||
return False
|
||||
|
@ -475,7 +501,7 @@ class Matcher(object):
|
|||
'''
|
||||
Determines if this host is on the list
|
||||
'''
|
||||
return bool(tgt.count(self.opts['id']))
|
||||
return bool(tgt in self.opts['id'])
|
||||
|
||||
def grain_match(self, tgt):
|
||||
'''
|
||||
|
@ -486,7 +512,7 @@ class Matcher(object):
|
|||
log.error('Got insufficient arguments for grains from master')
|
||||
return False
|
||||
if comps[0] not in self.opts['grains']:
|
||||
log.error('Got unknown grain from master: %s', comps[0])
|
||||
log.error('Got unknown grain from master: {0}'.format(comps[0]))
|
||||
return False
|
||||
return bool(re.match(comps[1], self.opts['grains'][comps[0]]))
|
||||
|
||||
|
@ -498,6 +524,54 @@ class Matcher(object):
|
|||
return False
|
||||
return(self.functions[tgt]())
|
||||
|
||||
def compound_match(self, tgt):
|
||||
'''
|
||||
Runs the compound target check
|
||||
'''
|
||||
if not isinstance(tgt, str):
|
||||
log.debug('Compound target received that is not a string')
|
||||
return False
|
||||
ref = {'G': 'grain',
|
||||
'X': 'exsel',
|
||||
'L': 'list',
|
||||
'E': 'pcre'}
|
||||
results = []
|
||||
for match in tgt.split():
|
||||
# Attach the boolean operator
|
||||
if match == 'and':
|
||||
results.append('and')
|
||||
continue
|
||||
elif match == 'or':
|
||||
results.append('or')
|
||||
continue
|
||||
# If we are here then it is not a boolean operator, check if the
|
||||
# last member of the result list is a boolean, if no, append and
|
||||
if results:
|
||||
if results[-1] != 'and' or results[-1] != 'or':
|
||||
results.append('and')
|
||||
if match[1] == '@':
|
||||
comps = match.split('@')
|
||||
matcher = ref.get(comps[0])
|
||||
if not matcher:
|
||||
# If an unknown matcher is called at any time, fail out
|
||||
return False
|
||||
print comps
|
||||
results.append(
|
||||
str(getattr(
|
||||
self,
|
||||
'{0}_match'.format(matcher)
|
||||
)('@'.join(comps[1:]))
|
||||
))
|
||||
else:
|
||||
results.append(
|
||||
str(getattr(
|
||||
self,
|
||||
'{0}_match'.format(matcher)
|
||||
)('@'.join(comps[1:]))
|
||||
))
|
||||
|
||||
print ' '.join(results)
|
||||
return eval(' '.join(results))
|
||||
|
||||
class FileClient(object):
|
||||
'''
|
||||
|
@ -505,6 +579,7 @@ class FileClient(object):
|
|||
'''
|
||||
def __init__(self, opts):
|
||||
self.opts = opts
|
||||
self.serial = salt.payload.Serial(self.opts)
|
||||
self.auth = salt.crypt.SAuth(opts)
|
||||
self.socket = self.__get_socket()
|
||||
|
||||
|
@ -522,7 +597,7 @@ class FileClient(object):
|
|||
Make sure that this path is intended for the salt master and trim it
|
||||
'''
|
||||
if not path.startswith('salt://'):
|
||||
raise MinionError('Unsupported path')
|
||||
raise MinionError('Unsupported path: {0}'.format(path))
|
||||
return path[7:]
|
||||
|
||||
def get_file(self, path, dest='', makedirs=False, env='base'):
|
||||
|
@ -549,9 +624,22 @@ class FileClient(object):
|
|||
else:
|
||||
load['loc'] = fn_.tell()
|
||||
payload['load'] = self.auth.crypticle.dumps(load)
|
||||
self.socket.send_pyobj(payload)
|
||||
data = self.auth.crypticle.loads(self.socket.recv_pyobj())
|
||||
self.socket.send(self.serial.dumps(payload))
|
||||
data = self.auth.crypticle.loads(self.serial.loads(self.socket.recv()))
|
||||
if not data['data']:
|
||||
if not fn_ and data['dest']:
|
||||
# This is a 0 byte file on the master
|
||||
dest = os.path.join(
|
||||
self.opts['cachedir'],
|
||||
'files',
|
||||
env,
|
||||
data['dest']
|
||||
)
|
||||
destdir = os.path.dirname(dest)
|
||||
if not os.path.isdir(destdir):
|
||||
os.makedirs(destdir)
|
||||
if not os.path.exists(dest):
|
||||
open(dest, 'w+').write(data['data'])
|
||||
break
|
||||
if not fn_:
|
||||
dest = os.path.join(
|
||||
|
@ -567,6 +655,32 @@ class FileClient(object):
|
|||
fn_.write(data['data'])
|
||||
return dest
|
||||
|
||||
def get_url(self, url, dest, makedirs=False, env='base'):
|
||||
'''
|
||||
Get a single file from a URL.
|
||||
'''
|
||||
if urlparse.urlparse(url).scheme == 'salt':
|
||||
return self.get_file(url, dest, makedirs, env)
|
||||
destdir = os.path.dirname(dest)
|
||||
if not os.path.isdir(destdir):
|
||||
if makedirs:
|
||||
os.makedirs(destdir)
|
||||
else:
|
||||
return False
|
||||
try:
|
||||
with contextlib.closing(urllib2.urlopen(url)) as srcfp:
|
||||
with open(dest, 'wb') as destfp:
|
||||
shutil.copyfileobj(srcfp, destfp)
|
||||
return dest
|
||||
except urllib2.HTTPError, ex:
|
||||
raise MinionError('HTTP error {0} reading {1}: {3}'.format(
|
||||
ex.code,
|
||||
url,
|
||||
*BaseHTTPServer.BaseHTTPRequestHandler.responses[ex.code]))
|
||||
except urllib2.URLError, ex:
|
||||
raise MinionError('Error reading {0}: {1}'.format(url, ex.reason))
|
||||
return False
|
||||
|
||||
def cache_file(self, path, env='base'):
|
||||
'''
|
||||
Pull a file down from the file server and store it in the minion file
|
||||
|
@ -612,8 +726,8 @@ class FileClient(object):
|
|||
load = {'env': env,
|
||||
'cmd': '_file_list'}
|
||||
payload['load'] = self.auth.crypticle.dumps(load)
|
||||
self.socket.send_pyobj(payload)
|
||||
return self.auth.crypticle.loads(self.socket.recv_pyobj())
|
||||
self.socket.send(self.serial.dumps(payload))
|
||||
return self.auth.crypticle.loads(self.serial.loads(self.socket.recv()))
|
||||
|
||||
def hash_file(self, path, env='base'):
|
||||
'''
|
||||
|
@ -621,14 +735,26 @@ class FileClient(object):
|
|||
salt master file server prepend the path with salt://<file on server>
|
||||
otherwise, prepend the file with / for a local file.
|
||||
'''
|
||||
path = self._check_proto(path)
|
||||
try:
|
||||
path = self._check_proto(path)
|
||||
except MinionError:
|
||||
if not os.path.isfile(path):
|
||||
err = ('Specified file {0} is not present to generate '
|
||||
'hash').format(path)
|
||||
log.warning(err)
|
||||
return {}
|
||||
else:
|
||||
ret = {}
|
||||
ret['hsum'] = hashlib.md5(open(path, 'rb').read()).hexdigest()
|
||||
ret['hash_type'] = 'md5'
|
||||
return ret
|
||||
payload = {'enc': 'aes'}
|
||||
load = {'path': path,
|
||||
'env': env,
|
||||
'cmd': '_file_hash'}
|
||||
payload['load'] = self.auth.crypticle.dumps(load)
|
||||
self.socket.send_pyobj(payload)
|
||||
return self.auth.crypticle.loads(self.socket.recv_pyobj())
|
||||
self.socket.send(self.serial.dumps(payload))
|
||||
return self.auth.crypticle.loads(self.serial.loads(self.socket.recv()))
|
||||
|
||||
def list_env(self, path, env='base'):
|
||||
'''
|
||||
|
@ -638,8 +764,8 @@ class FileClient(object):
|
|||
load = {'env': env,
|
||||
'cmd': '_file_list'}
|
||||
payload['load'] = self.auth.crypticle.dumps(load)
|
||||
self.socket.send_pyobj(payload)
|
||||
return self.auth.crypticle.loads(self.socket.recv_pyobj())
|
||||
self.socket.send(self.serial.dumps(payload))
|
||||
return self.auth.crypticle.loads(self.serial.loads(self.socket.recv()))
|
||||
|
||||
def get_state(self, sls, env):
|
||||
'''
|
||||
|
@ -662,5 +788,5 @@ class FileClient(object):
|
|||
payload = {'enc': 'aes'}
|
||||
load = {'cmd': '_master_opts'}
|
||||
payload['load'] = self.auth.crypticle.dumps(load)
|
||||
self.socket.send_pyobj(payload)
|
||||
return self.auth.crypticle.loads(self.socket.recv_pyobj())
|
||||
self.socket.send(self.serial.dumps(payload))
|
||||
return self.auth.crypticle.loads(self.serial.loads(self.socket.recv()))
|
||||
|
|
|
@ -2,18 +2,22 @@
|
|||
Support for Apache
|
||||
'''
|
||||
|
||||
from re import sub
|
||||
import re
|
||||
|
||||
__outputter__ = {
|
||||
'signal': 'txt',
|
||||
}
|
||||
|
||||
|
||||
def __detect_os():
|
||||
'''
|
||||
Apache commands and paths differ depending on packaging
|
||||
'''
|
||||
httpd = 'CentOS Scientific RedHat Fedora'
|
||||
apache2 = 'Ubuntu'
|
||||
if httpd.count(__grains__['os']):
|
||||
httpd = ('CentOS', 'Scientific', 'RedHat', 'Fedora')
|
||||
apache2 = ('Ubuntu',)
|
||||
if __grains__['os'] in httpd:
|
||||
return 'apachectl'
|
||||
elif apache2.count(__grains__['os']):
|
||||
elif __grains__['os'] in apache2:
|
||||
return 'apache2ctl'
|
||||
else:
|
||||
return 'apachectl'
|
||||
|
@ -46,10 +50,10 @@ def fullversion():
|
|||
ret['compiled_with'] = []
|
||||
out = __salt__['cmd.run'](cmd).split('\n')
|
||||
for line in out:
|
||||
if not line.count(' '):
|
||||
continue
|
||||
if ': ' in line:
|
||||
comps = line.split(': ')
|
||||
if not comps:
|
||||
continue
|
||||
ret[comps[0].strip().lower().replace(' ', '_')] = comps[1].strip()
|
||||
elif ' -D' in line:
|
||||
cw = line.strip(' -D ')
|
||||
|
@ -71,9 +75,9 @@ def modules():
|
|||
ret['shared'] = []
|
||||
out = __salt__['cmd.run'](cmd).split('\n')
|
||||
for line in out:
|
||||
if not line.count(' '):
|
||||
continue
|
||||
comps = line.split()
|
||||
if not comps:
|
||||
continue
|
||||
if '(static)' in line:
|
||||
ret['static'].append(comps[0])
|
||||
if '(shared)' in line:
|
||||
|
@ -93,7 +97,7 @@ def servermods():
|
|||
ret = []
|
||||
out = __salt__['cmd.run'](cmd).split('\n')
|
||||
for line in out:
|
||||
if not line.count(' '):
|
||||
if not line:
|
||||
continue
|
||||
if '.c' in line:
|
||||
ret.append(line.strip())
|
||||
|
@ -114,7 +118,7 @@ def directives():
|
|||
out = __salt__['cmd.run'](cmd)
|
||||
out = out.replace('\n\t', '\t')
|
||||
for line in out.split('\n'):
|
||||
if not line.count(' '):
|
||||
if not line:
|
||||
continue
|
||||
comps = line.split('\t')
|
||||
desc = '\n'.join(comps[1:])
|
||||
|
@ -138,7 +142,7 @@ def vhosts():
|
|||
namevhost = ''
|
||||
out = __salt__['cmd.run'](cmd)
|
||||
for line in out.split('\n'):
|
||||
if not line.count(' '):
|
||||
if not line:
|
||||
continue
|
||||
comps = line.split()
|
||||
if 'is a NameVirtualHost' in line:
|
||||
|
@ -148,11 +152,11 @@ def vhosts():
|
|||
if comps[0] == 'default':
|
||||
ret[namevhost]['default'] = {}
|
||||
ret[namevhost]['default']['vhost'] = comps[2]
|
||||
ret[namevhost]['default']['conf'] = sub(r'\(|\)', '', comps[3])
|
||||
ret[namevhost]['default']['conf'] = re.sub(r'\(|\)', '', comps[3])
|
||||
if comps[0] == 'port':
|
||||
ret[namevhost][comps[3]] = {}
|
||||
ret[namevhost][comps[3]]['vhost'] = comps[3]
|
||||
ret[namevhost][comps[3]]['conf'] = sub(r'\(|\)', '', comps[4])
|
||||
ret[namevhost][comps[3]]['conf'] = re.sub(r'\(|\)', '', comps[4])
|
||||
ret[namevhost][comps[3]]['port'] = comps[1]
|
||||
return ret
|
||||
|
||||
|
@ -165,8 +169,28 @@ def signal(signal=None):
|
|||
|
||||
salt '*' apache.signal restart
|
||||
'''
|
||||
valid_signals = 'start stop restart graceful graceful-stop'
|
||||
if not valid_signals.count(signal):
|
||||
no_extra_args = ('configtest', 'status', 'fullstatus')
|
||||
valid_signals = ('start', 'stop', 'restart', 'graceful', 'graceful-stop')
|
||||
|
||||
if signal not in valid_signals and signal not in no_extra_args:
|
||||
return
|
||||
cmd = __detect_os() + ' -k %s' % signal
|
||||
out = __salt__['cmd.run'](cmd)
|
||||
# Make sure you use the right arguments
|
||||
if signal in valid_signals:
|
||||
arguments = ' -k {0}'.format(signal)
|
||||
else:
|
||||
arguments = ' {0}'.format(signal)
|
||||
cmd = __detect_os() + arguments
|
||||
out = __salt__['cmd.run_all'](cmd)
|
||||
|
||||
# A non-zero return code means fail
|
||||
if out['retcode'] and out['stderr']:
|
||||
ret = out['stderr'].strip()
|
||||
# 'apachectl configtest' returns 'Syntax OK' to stderr
|
||||
elif out['stderr']:
|
||||
ret = out['stderr'].strip()
|
||||
elif out['stdout']:
|
||||
ret = out['stdout'].strip()
|
||||
# No output for something like: apachectl graceful
|
||||
else:
|
||||
ret = 'Command: "{0}" completed successfully!'.format(cmd)
|
||||
return ret
|
||||
|
|
|
@ -8,7 +8,7 @@ def __virtual__():
|
|||
Confirm this module is on a Debian based system
|
||||
'''
|
||||
|
||||
return 'pkg' if __grains__['os'] == 'Debian' else False
|
||||
return 'pkg' if __grains__['os'] in [ 'Debian', 'Ubuntu' ] else False
|
||||
|
||||
|
||||
def available_version(name):
|
||||
|
@ -69,7 +69,7 @@ def refresh_db():
|
|||
if not len(cols):
|
||||
continue
|
||||
ident = " ".join(cols[1:4])
|
||||
if cols[0].count('Get'):
|
||||
if 'Get' in cols[0]:
|
||||
servers[ident] = True
|
||||
else:
|
||||
servers[ident] = False
|
||||
|
@ -96,7 +96,7 @@ def install(pkg, refresh=False):
|
|||
ret_pkgs = {}
|
||||
old_pkgs = list_pkgs()
|
||||
cmd = 'apt-get -y install {0}'.format(pkg)
|
||||
__salt__['cmd.retcode'](cmd)
|
||||
__salt__['cmd.run'](cmd)
|
||||
new_pkgs = list_pkgs()
|
||||
|
||||
for pkg in new_pkgs:
|
||||
|
@ -115,7 +115,7 @@ def install(pkg, refresh=False):
|
|||
|
||||
def remove(pkg):
|
||||
'''
|
||||
Remove a single package via ``aptitude remove``
|
||||
Remove a single package via ``apt-get remove``
|
||||
|
||||
Returns a list containing the names of the removed packages.
|
||||
|
||||
|
@ -127,7 +127,7 @@ def remove(pkg):
|
|||
old_pkgs = list_pkgs()
|
||||
|
||||
cmd = 'apt-get -y remove {0}'.format(pkg)
|
||||
__salt__['cmd.retcode'](cmd)
|
||||
__salt__['cmd.run'](cmd)
|
||||
new_pkgs = list_pkgs()
|
||||
for pkg in old_pkgs:
|
||||
if pkg not in new_pkgs:
|
||||
|
@ -138,8 +138,8 @@ def remove(pkg):
|
|||
|
||||
def purge(pkg):
|
||||
'''
|
||||
Remove a package via aptitude along with all configuration files and
|
||||
unused dependencies.
|
||||
Remove a package via ``apt-get purge`` along with all configuration
|
||||
files and unused dependencies.
|
||||
|
||||
Returns a list containing the names of the removed packages
|
||||
|
||||
|
@ -152,10 +152,10 @@ def purge(pkg):
|
|||
|
||||
# Remove inital package
|
||||
purge_cmd = 'apt-get -y purge {0}'.format(pkg)
|
||||
__salt__['cmd.retcode'](purge_cmd)
|
||||
|
||||
__salt__['cmd.run'](purge_cmd)
|
||||
|
||||
new_pkgs = list_pkgs()
|
||||
|
||||
|
||||
for pkg in old_pkgs:
|
||||
if pkg not in new_pkgs:
|
||||
ret_pkgs.append(pkg)
|
||||
|
@ -165,7 +165,7 @@ def purge(pkg):
|
|||
|
||||
def upgrade(refresh=True):
|
||||
'''
|
||||
Upgrades all packages via aptitude full-upgrade
|
||||
Upgrades all packages via ``apt-get dist-upgrade``
|
||||
|
||||
Returns a list of dicts containing the package names, and the new and old
|
||||
versions::
|
||||
|
@ -188,7 +188,7 @@ def upgrade(refresh=True):
|
|||
ret_pkgs = {}
|
||||
old_pkgs = list_pkgs()
|
||||
cmd = 'apt-get -y dist-upgrade'
|
||||
__salt__['cmd.retcode'](cmd)
|
||||
__salt__['cmd.run'](cmd)
|
||||
new_pkgs = list_pkgs()
|
||||
|
||||
for pkg in new_pkgs:
|
||||
|
@ -222,7 +222,7 @@ def list_pkgs(regex_string=""):
|
|||
|
||||
for line in out.split('\n'):
|
||||
cols = line.split()
|
||||
if len(cols) and cols[0].count('ii'):
|
||||
if len(cols) and 'ii' in cols[0]:
|
||||
ret[cols[1]] = cols[2]
|
||||
|
||||
return ret
|
||||
|
|
|
@ -32,7 +32,7 @@ def gzip(sourcefile):
|
|||
|
||||
def gunzip(gzipfile):
|
||||
'''
|
||||
Uses the gzip command to create gzip files
|
||||
Uses the gunzip command to unpack gzip files
|
||||
|
||||
CLI Example to create ``/tmp/sourcefile.txt``::
|
||||
|
||||
|
|
|
@ -9,6 +9,7 @@ import logging
|
|||
import os
|
||||
import subprocess
|
||||
import tempfile
|
||||
import salt.utils
|
||||
|
||||
# Set up logging
|
||||
log = logging.getLogger(__name__)
|
||||
|
@ -19,15 +20,39 @@ DEFAULT_CWD = os.path.expanduser('~')
|
|||
|
||||
# Set up the default outputters
|
||||
__outputter__ = {
|
||||
'run': 'txt'
|
||||
}
|
||||
'run': 'txt',
|
||||
}
|
||||
def _run(cmd,
|
||||
cwd=DEFAULT_CWD,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE,
|
||||
quiet=False):
|
||||
'''
|
||||
Do the DRY thing and only call subprocess.Popen() once
|
||||
'''
|
||||
ret = {}
|
||||
if not quiet:
|
||||
log.info('Executing command {0} in directory {1}'.format(cmd, cwd))
|
||||
proc = subprocess.Popen(cmd,
|
||||
cwd=cwd,
|
||||
shell=True,
|
||||
stdout=stdout,
|
||||
stderr=stderr,
|
||||
)
|
||||
out = proc.communicate()
|
||||
ret['stdout'] = out[0]
|
||||
ret['stderr'] = out[1]
|
||||
ret['retcode'] = proc.returncode
|
||||
ret['pid'] = proc.pid
|
||||
|
||||
return ret
|
||||
|
||||
|
||||
def _is_exec(path):
|
||||
def _run_quiet(cmd, cwd=DEFAULT_CWD):
|
||||
'''
|
||||
Return true if the passed path exists and is execuatable
|
||||
Helper for running commands quietly for minion startup
|
||||
'''
|
||||
return os.path.exists(path) and os.access(path, os.X_OK)
|
||||
return _run(cmd, cwd, stderr=subprocess.STDOUT, quiet=True)['stdout']
|
||||
|
||||
|
||||
def run(cmd, cwd=DEFAULT_CWD):
|
||||
|
@ -36,14 +61,9 @@ def run(cmd, cwd=DEFAULT_CWD):
|
|||
|
||||
CLI Example::
|
||||
|
||||
salt '*' cmd.run "ls -l | grep foo | awk '{print $2}'"
|
||||
salt '*' cmd.run "ls -l | awk '/foo/{print $2}'"
|
||||
'''
|
||||
log.info('Executing command {0} in directory {1}'.format(cmd, cwd))
|
||||
out = subprocess.Popen(cmd,
|
||||
shell=True,
|
||||
cwd=cwd,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.STDOUT).communicate()[0]
|
||||
out = _run(cmd, cwd=cwd, stderr=subprocess.STDOUT)['stdout']
|
||||
log.debug(out)
|
||||
return out
|
||||
|
||||
|
@ -54,13 +74,9 @@ def run_stdout(cmd, cwd=DEFAULT_CWD):
|
|||
|
||||
CLI Example::
|
||||
|
||||
salt '*' cmd.run "ls -l | grep foo | awk '{print $2}'"
|
||||
salt '*' cmd.run_stdout "ls -l | awk '/foo/{print $2}'"
|
||||
'''
|
||||
log.info('Executing command {0} in directory {1}'.format(cmd, cwd))
|
||||
stdout = subprocess.Popen(cmd,
|
||||
shell=True,
|
||||
cwd=cwd,
|
||||
stdout=subprocess.PIPE).communicate()[0]
|
||||
stdout = _run(cmd, cwd=cwd)["stdout"]
|
||||
log.debug(stdout)
|
||||
return stdout
|
||||
|
||||
|
@ -71,13 +87,9 @@ def run_stderr(cmd, cwd=DEFAULT_CWD):
|
|||
|
||||
CLI Example::
|
||||
|
||||
salt '*' cmd.run "ls -l | grep foo | awk '{print $2}'"
|
||||
salt '*' cmd.run_stderr "ls -l | awk '/foo/{print $2}'"
|
||||
'''
|
||||
log.info('Executing command {0} in directory {1}'.format(cmd, cwd))
|
||||
stderr = subprocess.Popen(cmd,
|
||||
shell=True,
|
||||
cwd=cwd,
|
||||
stderr=subprocess.PIPE).communicate()[0]
|
||||
stderr = _run(cmd, cwd=cwd)["stderr"]
|
||||
log.debug(stderr)
|
||||
return stderr
|
||||
|
||||
|
@ -88,20 +100,9 @@ def run_all(cmd, cwd=DEFAULT_CWD):
|
|||
|
||||
CLI Example::
|
||||
|
||||
salt '*' cmd.run_all "ls -l | grep foo | awk '{print $2}'"
|
||||
salt '*' cmd.run_all "ls -l | awk '/foo/{print $2}'"
|
||||
'''
|
||||
log.info('Executing command {0} in directory {1}'.format(cmd, cwd))
|
||||
ret = {}
|
||||
proc = subprocess.Popen(cmd,
|
||||
shell=True,
|
||||
cwd=cwd,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE)
|
||||
out = proc.communicate()
|
||||
ret['stdout'] = out[0]
|
||||
ret['stderr'] = out[1]
|
||||
ret['retcode'] = proc.returncode
|
||||
ret['pid'] = proc.pid
|
||||
ret = _run(cmd, cwd=cwd)
|
||||
if not ret['retcode']:
|
||||
log.error('Command {0} failed'.format(cmd))
|
||||
log.error('stdout: {0}'.format(ret['stdout']))
|
||||
|
@ -130,16 +131,19 @@ def has_exec(cmd):
|
|||
|
||||
CLI Example::
|
||||
|
||||
salt '*' cat
|
||||
salt '*' cmd.has_exec cat
|
||||
'''
|
||||
if cmd.startswith('/'):
|
||||
return _is_exec(cmd)
|
||||
for path in os.environ['PATH'].split(os.pathsep):
|
||||
fn_ = os.path.join(path, cmd)
|
||||
if _is_exec(fn_):
|
||||
return True
|
||||
return False
|
||||
return bool(salt.utils.which(cmd))
|
||||
|
||||
def which(cmd):
|
||||
'''
|
||||
Returns the path of an executable available on the minion, None otherwise
|
||||
|
||||
CLI Example::
|
||||
|
||||
salt '*' cmd.which cat
|
||||
'''
|
||||
return salt.utils.which(cmd)
|
||||
|
||||
def exec_code(lang, code, cwd=DEFAULT_CWD):
|
||||
'''
|
||||
|
@ -151,10 +155,8 @@ def exec_code(lang, code, cwd=DEFAULT_CWD):
|
|||
|
||||
salt '*' cmd.exec_code ruby 'puts "cheese"'
|
||||
'''
|
||||
fd, cfn = tempfile.mkstemp()
|
||||
open(cfn, 'w+').write(code)
|
||||
return subprocess.Popen(lang + ' ' + cfn,
|
||||
shell=True,
|
||||
cwd=cwd,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.STDOUT).communicate()[0]
|
||||
fd, codefile = tempfile.mkstemp()
|
||||
open(codefile, 'w+').write(code)
|
||||
|
||||
cmd = '{0} {1}'.format(lang, codefile)
|
||||
return run(cmd, cwd=cwd)
|
||||
|
|
|
@ -43,6 +43,17 @@ def get_file(path, dest, env='base'):
|
|||
return client.get_file(path, dest, False, env)
|
||||
|
||||
|
||||
def get_url(path, dest, env='base'):
|
||||
'''
|
||||
Used to get a single file from a URL.
|
||||
For example,
|
||||
cp.get_url salt://my/file /tmp/mine
|
||||
cp.get_url http://www.slashdot.org /tmp/index.html
|
||||
'''
|
||||
client = salt.minion.FileClient(__opts__)
|
||||
return client.get_url(path, dest, False, env)
|
||||
|
||||
|
||||
def cache_file(path, env='base'):
|
||||
'''
|
||||
Used to cache a single file in the local salt-master file cache.
|
||||
|
|
|
@ -41,7 +41,7 @@ def _render_tab(lst):
|
|||
|
||||
def _write_cron(user, lines):
|
||||
'''
|
||||
Takes a list of lines to be commited to a user's crontab and writes it
|
||||
Takes a list of lines to be committed to a user's crontab and writes it
|
||||
'''
|
||||
tmpd, path = tempfile.mkstemp()
|
||||
open(path, 'w+').writelines(lines)
|
||||
|
@ -100,6 +100,9 @@ def list_tab(user):
|
|||
ret['pre'].append(line)
|
||||
return ret
|
||||
|
||||
# For consistency's sake
|
||||
ls = list_tab
|
||||
|
||||
|
||||
def set_special(user, special, cmd):
|
||||
'''
|
||||
|
@ -188,3 +191,5 @@ def rm_job(user, minute, hour, dom, month, dow, cmd):
|
|||
# Failed to commit, return the error
|
||||
return comdat['stderr']
|
||||
return ret
|
||||
|
||||
rm = rm_job
|
||||
|
|
|
@ -2,9 +2,6 @@
|
|||
Module for gathering disk information
|
||||
'''
|
||||
|
||||
# FIXME: we want module internal calls rather than using subprocess directly
|
||||
import subprocess
|
||||
|
||||
|
||||
def usage():
|
||||
'''
|
||||
|
@ -16,20 +13,49 @@ def usage():
|
|||
'''
|
||||
cmd = 'df -P'
|
||||
ret = {}
|
||||
out = subprocess.Popen(cmd,
|
||||
shell=True,
|
||||
stdout=subprocess.PIPE).communicate()[0].split('\n')
|
||||
out = __salt__['cmd.run'](cmd).split('\n')
|
||||
for line in out:
|
||||
if not line.count(' '):
|
||||
if not line:
|
||||
continue
|
||||
if line.startswith('Filesystem'):
|
||||
continue
|
||||
comps = line.split()
|
||||
ret[comps[0]] = {
|
||||
'1K-blocks': comps[1],
|
||||
'available': comps[3],
|
||||
'capacity': comps[4],
|
||||
'mountpoint': comps[5],
|
||||
'used': comps[2]
|
||||
ret[comps[5]] = {
|
||||
'filesystem': comps[0],
|
||||
'1K-blocks': comps[1],
|
||||
'used': comps[2],
|
||||
'available': comps[3],
|
||||
'capacity': comps[4],
|
||||
}
|
||||
return ret
|
||||
|
||||
def inodeusage():
|
||||
'''
|
||||
Return inode usage information for volumes mounted on this minion
|
||||
|
||||
CLI Example::
|
||||
|
||||
salt '*' disk.inodeusage
|
||||
'''
|
||||
cmd = 'df -i'
|
||||
ret = {}
|
||||
out = __salt__['cmd.run'](cmd).split('\n')
|
||||
for line in out:
|
||||
if line.startswith('Filesystem'):
|
||||
continue
|
||||
comps = line.split()
|
||||
# Don't choke on empty lines
|
||||
if not comps:
|
||||
continue
|
||||
|
||||
try:
|
||||
ret[comps[5]] = {
|
||||
'inodes': comps[1],
|
||||
'used': comps[2],
|
||||
'free': comps[3],
|
||||
'use': comps[4],
|
||||
'filesystem': comps[0],
|
||||
}
|
||||
except IndexError:
|
||||
print "DEBUG: comps='%s'" % comps
|
||||
return ret
|
||||
|
|
|
@ -5,7 +5,7 @@ Support for Portage
|
|||
try:
|
||||
import portage
|
||||
except ImportError:
|
||||
None
|
||||
pass
|
||||
|
||||
def __virtual__():
|
||||
'''
|
||||
|
@ -101,7 +101,7 @@ def install(pkg, refresh=False):
|
|||
new_pkgs = list_pkgs()
|
||||
|
||||
for pkg in new_pkgs:
|
||||
if old_pkgs.has_key(pkg):
|
||||
if pkg in old_pkgs:
|
||||
if old_pkgs[pkg] == new_pkgs[pkg]:
|
||||
continue
|
||||
else:
|
||||
|
@ -136,7 +136,7 @@ def update(pkg, refresh=False):
|
|||
new_pkgs = list_pkgs()
|
||||
|
||||
for pkg in new_pkgs:
|
||||
if old_pkgs.has_key(pkg):
|
||||
if pkg in old_pkgs:
|
||||
if old_pkgs[pkg] == new_pkgs[pkg]:
|
||||
continue
|
||||
else:
|
||||
|
@ -170,7 +170,7 @@ def upgrade(refresh=False):
|
|||
new_pkgs = list_pkgs()
|
||||
|
||||
for pkg in new_pkgs:
|
||||
if old_pkgs.has_key(pkg):
|
||||
if pkg in old_pkgs:
|
||||
if old_pkgs[pkg] == new_pkgs[pkg]:
|
||||
continue
|
||||
else:
|
||||
|
@ -200,7 +200,7 @@ def remove(pkg):
|
|||
new_pkgs = list_pkgs()
|
||||
|
||||
for pkg in old_pkgs:
|
||||
if not new_pkgs.has_key(pkg):
|
||||
if not pkg in new_pkgs:
|
||||
ret_pkgs.append(pkg)
|
||||
|
||||
return ret_pkgs
|
||||
|
|
|
@ -44,7 +44,7 @@ def group_to_gid(group):
|
|||
|
||||
def get_gid(path):
|
||||
'''
|
||||
Return the user that owns a given file
|
||||
Return the id of the group that owns a given file
|
||||
|
||||
CLI Example::
|
||||
|
||||
|
@ -57,7 +57,7 @@ def get_gid(path):
|
|||
|
||||
def get_group(path):
|
||||
'''
|
||||
Return the user that owns a given file
|
||||
Return the group that owns a given file
|
||||
|
||||
CLI Example::
|
||||
|
||||
|
@ -85,7 +85,7 @@ def uid_to_user(uid):
|
|||
|
||||
def user_to_uid(user):
|
||||
'''
|
||||
Convert user name to a gid
|
||||
Convert user name to a uid
|
||||
|
||||
CLI Example::
|
||||
|
||||
|
@ -99,7 +99,7 @@ def user_to_uid(user):
|
|||
|
||||
def get_uid(path):
|
||||
'''
|
||||
Return the user that owns a given file
|
||||
Return the id of the user that owns a given file
|
||||
|
||||
CLI Example::
|
||||
|
||||
|
@ -142,7 +142,7 @@ def get_mode(path):
|
|||
|
||||
def set_mode(path, mode):
|
||||
'''
|
||||
Set the more of a file
|
||||
Set the mode of a file
|
||||
|
||||
CLI Example::
|
||||
|
||||
|
@ -208,7 +208,7 @@ def get_sum(path, form='md5'):
|
|||
|
||||
CLI Example::
|
||||
|
||||
salt '*' /etc/passwd sha512
|
||||
salt '*' file.get_sum /etc/passwd sha512
|
||||
'''
|
||||
if not os.path.isfile(path):
|
||||
return 'File not found'
|
||||
|
@ -311,9 +311,9 @@ def find(path, *opts):
|
|||
|
||||
CLI Examples::
|
||||
|
||||
salt '*' / type=f name=\*.bak size=+10m
|
||||
salt '*' /var mtime=+30d size=+10m print=path,size,mtime
|
||||
salt '*' /var/log name=\*.[0-9] mtime=+30d size=+10m delete
|
||||
salt '*' file.find / type=f name=\*.bak size=+10m
|
||||
salt '*' file.find /var mtime=+30d size=+10m print=path,size,mtime
|
||||
salt '*' file.find /var/log name=\*.[0-9] mtime=+30d size=+10m delete
|
||||
'''
|
||||
opts_dict = {}
|
||||
for opt in opts:
|
||||
|
@ -327,3 +327,169 @@ def find(path, *opts):
|
|||
ret = [p for p in f.find(path)]
|
||||
ret.sort()
|
||||
return ret
|
||||
|
||||
def _sed_esc(s):
|
||||
'''
|
||||
Escape single quotes and forward slashes
|
||||
'''
|
||||
return '{0}'.format(s).replace("'", "'\"'\"'").replace("/", "\/")
|
||||
|
||||
def sed(path, before, after, limit='', backup='.bak', options='-r -e',
|
||||
flags='g'):
|
||||
'''
|
||||
Make a simple edit to a file
|
||||
|
||||
Equivalent to::
|
||||
|
||||
sed <backup> <options> "/<limit>/ s/<before>/<after>/<flags> <file>"
|
||||
|
||||
path
|
||||
The full path to the file to be edited
|
||||
before
|
||||
A pattern to find in order to replace with ``after``
|
||||
after
|
||||
Text that will replace ``before``
|
||||
limit : ``''``
|
||||
An initial pattern to search for before searching for ``before``
|
||||
backup : ``.bak``
|
||||
The file will be backed up before edit with this file extension;
|
||||
**WARNING:** each time ``sed``/``comment``/``uncomment`` is called will
|
||||
overwrite this backup
|
||||
options : ``-r -e``
|
||||
Options to pass to sed
|
||||
flags : ``g``
|
||||
Flags to modify the sed search; e.g., ``i`` for case-insensitve pattern
|
||||
matching
|
||||
|
||||
Forward slashes and single quotes will be escaped automatically in the
|
||||
``before`` and ``after`` patterns.
|
||||
|
||||
Usage::
|
||||
|
||||
salt '*' file.sed /etc/httpd/httpd.conf 'LogLevel warn' 'LogLevel info'
|
||||
|
||||
.. versionadded:: 0.9.5
|
||||
'''
|
||||
# Largely inspired by Fabric's contrib.files.sed()
|
||||
|
||||
before = _sed_esc(before)
|
||||
after = _sed_esc(after)
|
||||
|
||||
cmd = r"sed {backup}{options} '{limit}s/{before}/{after}/{flags}' {path}".format(
|
||||
backup = '-i{0} '.format(backup) if backup else '',
|
||||
options = options,
|
||||
limit = '/{0}/ '.format(limit) if limit else '',
|
||||
before = before,
|
||||
after = after,
|
||||
flags = flags,
|
||||
path = path)
|
||||
|
||||
return __salt__['cmd.run'](cmd)
|
||||
|
||||
def uncomment(path, regex, char='#', backup='.bak'):
|
||||
'''
|
||||
Uncomment specified commented lines in a file
|
||||
|
||||
path
|
||||
The full path to the file to be edited
|
||||
regex
|
||||
A regular expression used to find the lines that are to be uncommented
|
||||
char : ``#``
|
||||
The character to remove in order to uncomment a line; if a single
|
||||
whitespace character follows the comment it will also be removed
|
||||
backup : ``.bak``
|
||||
The file will be backed up before edit with this file extension;
|
||||
**WARNING:** each time ``sed``/``comment``/``uncomment`` is called will
|
||||
overwrite this backup
|
||||
|
||||
Usage::
|
||||
|
||||
salt '*' file.uncomment /etc/hosts.deny 'ALL: PARANOID'
|
||||
|
||||
.. versionadded:: 0.9.5
|
||||
'''
|
||||
# Largely inspired by Fabric's contrib.files.uncomment()
|
||||
|
||||
return __salt__['file.sed'](path,
|
||||
before=r'^([[:space:]]*){0}[[:space:]]?'.format(char),
|
||||
after=r'\1',
|
||||
limit=regex,
|
||||
backup=backup)
|
||||
|
||||
def comment(path, regex, char='#', backup='.bak'):
|
||||
'''
|
||||
Comment out specified lines in a file
|
||||
|
||||
path
|
||||
The full path to the file to be edited
|
||||
regex
|
||||
A regular expression used to find the lines that are to be commented;
|
||||
this pattern will be wrapped in parenthesis and will move any
|
||||
preceding/trailing ``^`` or ``$`` characters outside the parenthesis
|
||||
(e.g., the pattern ``^foo$`` will be rewritten as ``^(foo)$``)
|
||||
char : ``#``
|
||||
The character to be inserted at the beginning of a line in order to
|
||||
comment it out
|
||||
backup : ``.bak``
|
||||
The file will be backed up before edit with this file extension;
|
||||
**WARNING:** each time ``sed``/``comment``/``uncomment`` is called will
|
||||
overwrite this backup
|
||||
|
||||
Usage::
|
||||
|
||||
salt '*' file.comment /etc/modules pcspkr
|
||||
|
||||
.. versionadded:: 0.9.5
|
||||
'''
|
||||
# Largely inspired by Fabric's contrib.files.comment()
|
||||
|
||||
regex = "{0}({1}){2}".format(
|
||||
'^' if regex.startswith('^') else '',
|
||||
regex.lstrip('^').rstrip('$'),
|
||||
'$' if regex.endswith('$') else '')
|
||||
|
||||
return __salt__['file.sed'](
|
||||
path,
|
||||
before=regex,
|
||||
after=r'{0}\1'.format(char),
|
||||
backup=backup)
|
||||
|
||||
def contains(path, text, limit=''):
|
||||
'''
|
||||
Return True if the file at ``path`` contains ``text``
|
||||
|
||||
Usage::
|
||||
|
||||
salt '*' file.contains /etc/crontab 'mymaintenance.sh'
|
||||
|
||||
.. versionadded:: 0.9.5
|
||||
'''
|
||||
# Largely inspired by Fabric's contrib.files.contains()
|
||||
|
||||
if not os.path.exists(path):
|
||||
return False
|
||||
|
||||
result = __salt__['file.sed'](path, text, '&', limit=limit, backup='',
|
||||
options='-n -r -e', flags='gp')
|
||||
|
||||
return bool(result)
|
||||
|
||||
def append(path, *args):
|
||||
'''
|
||||
Append text to the end of a file
|
||||
|
||||
Usage::
|
||||
|
||||
salt '*' file.append /etc/motd \\
|
||||
"With all thine offerings thou shalt offer salt."\\
|
||||
"Salt is what makes things taste bad when it isn't in them."
|
||||
|
||||
.. versionadded:: 0.9.5
|
||||
'''
|
||||
# Largely inspired by Fabric's contrib.files.append()
|
||||
|
||||
with open(path, "a") as f:
|
||||
for line in args:
|
||||
f.write('{0}\n'.format(line))
|
||||
|
||||
return "Wrote {0} lines to '{1}'".format(len(args), path)
|
||||
|
|
127
salt/modules/freebsdkmod.py
Normal file
127
salt/modules/freebsdkmod.py
Normal file
|
@ -0,0 +1,127 @@
|
|||
'''
|
||||
Module to manage FreeBSD kernel modules
|
||||
'''
|
||||
|
||||
import os
|
||||
|
||||
|
||||
def __virtual__():
|
||||
'''
|
||||
Only runs on FreeBSD systems
|
||||
'''
|
||||
return 'kmod' if __grains__['kernel'] == 'FreeBSD' else False
|
||||
|
||||
|
||||
def _new_mods(pre_mods, post_mods):
|
||||
'''
|
||||
Return a list of the new modules, pass an kldstat dict before running
|
||||
modprobe and one after modprobe has run
|
||||
'''
|
||||
pre = set()
|
||||
post = set()
|
||||
for mod in pre_mods:
|
||||
pre.add(mod['module'])
|
||||
for mod in post_mods:
|
||||
post.add(mod['module'])
|
||||
return list(post.difference(pre))
|
||||
|
||||
|
||||
def _rm_mods(pre_mods, post_mods):
|
||||
'''
|
||||
Return a list of the new modules, pass an kldstat dict before running
|
||||
modprobe and one after modprobe has run
|
||||
'''
|
||||
pre = set()
|
||||
post = set()
|
||||
for mod in pre_mods:
|
||||
pre.add(mod['module'])
|
||||
for mod in post_mods:
|
||||
post.add(mod['module'])
|
||||
return list(pre.difference(post))
|
||||
|
||||
|
||||
def available():
|
||||
'''
|
||||
Return a list of all available kernel modules
|
||||
|
||||
CLI Example::
|
||||
|
||||
salt '*' kmod.available
|
||||
'''
|
||||
ret = []
|
||||
for path in __salt__['cmd.run']('ls /boot/kernel | grep .ko$').split('\n'):
|
||||
bpath = os.path.basename(path)
|
||||
comps = bpath.split('.')
|
||||
if 'ko' in comps:
|
||||
# This is a kernel module, return it without the .ko extension
|
||||
ret.append('.'.join(comps[:comps.index('ko')]))
|
||||
return ret
|
||||
|
||||
|
||||
def check_available(mod):
|
||||
'''
|
||||
Check to see if the specified kernel module is available
|
||||
|
||||
CLI Example::
|
||||
|
||||
salt '*' kmod.check_available kvm
|
||||
'''
|
||||
if mod in available():
|
||||
# the module is available, return True
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def lsmod():
|
||||
'''
|
||||
Return a dict containing information about currently loaded modules
|
||||
|
||||
CLI Example::
|
||||
|
||||
salt '*' kmod.lsmod
|
||||
'''
|
||||
ret = []
|
||||
for line in __salt__['cmd.run']('kldstat').split('\n'):
|
||||
comps = line.split()
|
||||
if not len(comps) > 2:
|
||||
continue
|
||||
if comps[0] == 'Module':
|
||||
continue
|
||||
mdat = {}
|
||||
mdat['module'] = comps[0]
|
||||
mdat['size'] = comps[1]
|
||||
mdat['depcount'] = comps[2]
|
||||
if len(comps) > 3:
|
||||
mdat['deps'] = comps[3].split(',')
|
||||
else:
|
||||
mdat['deps'] = []
|
||||
ret.append(mdat)
|
||||
return ret
|
||||
|
||||
|
||||
def load(mod):
|
||||
'''
|
||||
Load the specified kernel module
|
||||
|
||||
CLI Example::
|
||||
|
||||
salt '*' kmod.load kvm
|
||||
'''
|
||||
pre_mods = kldstat()
|
||||
data = __salt__['cmd.run_all']('kldload {0}'.format(mod))
|
||||
post_mods = kldstat()
|
||||
return _new_mods(pre_mods, post_mods)
|
||||
|
||||
|
||||
def remove(mod):
|
||||
'''
|
||||
Remove the specified kernel module
|
||||
|
||||
CLI Example::
|
||||
|
||||
salt '*' kmod.remove kvm
|
||||
'''
|
||||
pre_mods = kldstat()
|
||||
data = __salt__['cmd.run_all']('kldunload {0}'.format(mod))
|
||||
post_mods = kldstat()
|
||||
return _rm_mods(pre_mods, post_mods)
|
|
@ -71,7 +71,7 @@ def list_pkgs():
|
|||
|
||||
def refresh_db():
|
||||
'''
|
||||
Update the ports tree with portsnap. If the ports tre does not exist it
|
||||
Update the ports tree with portsnap. If the ports tree does not exist it
|
||||
will be downloaded and set up.
|
||||
|
||||
CLI Example::
|
||||
|
|
|
@ -5,6 +5,12 @@ Control aspects of the grains data
|
|||
# Seed the grains dict so cython will build
|
||||
__grains__ = {}
|
||||
|
||||
# Change the default outputter to make it more readable
|
||||
__outputter__ = {
|
||||
'item' : 'txt',
|
||||
'items': 'yaml',
|
||||
}
|
||||
|
||||
|
||||
def items():
|
||||
'''
|
||||
|
@ -17,7 +23,7 @@ def items():
|
|||
return __grains__
|
||||
|
||||
|
||||
def item(key):
|
||||
def item(key=None):
|
||||
'''
|
||||
Return a singe component of the grains data
|
||||
|
||||
|
@ -25,6 +31,14 @@ def item(key):
|
|||
|
||||
salt '*' grains.item os
|
||||
'''
|
||||
if key in __grains__:
|
||||
return __grains__[key]
|
||||
return ''
|
||||
return __grains__.get(key, '')
|
||||
|
||||
def ls():
|
||||
'''
|
||||
Return a list of all available grains
|
||||
|
||||
CLI Example::
|
||||
|
||||
salt '*' grains.ls
|
||||
'''
|
||||
return sorted(__grains__)
|
||||
|
|
|
@ -74,11 +74,11 @@ def getent():
|
|||
|
||||
def chgid(name, gid):
|
||||
'''
|
||||
Change the default shell of the user
|
||||
Change the gid for a named group
|
||||
|
||||
CLI Example::
|
||||
|
||||
salt '*' user.chshell foo /bin/zsh
|
||||
salt '*' group.chgid foo 4376
|
||||
'''
|
||||
pre_gid = __salt__['file.group_to_gid'](name)
|
||||
if gid == pre_gid:
|
||||
|
|
|
@ -15,7 +15,7 @@ def list_hosts():
|
|||
|
||||
salt '*' hosts.list_hosts
|
||||
'''
|
||||
hfn = '/etc/hosts'
|
||||
hfn = list_hosts.hosts_filename
|
||||
ret = {}
|
||||
if not os.path.isfile(hfn):
|
||||
return ret
|
||||
|
@ -26,8 +26,13 @@ def list_hosts():
|
|||
if line.startswith('#'):
|
||||
continue
|
||||
comps = line.split()
|
||||
ret[comps[0]] = comps[1:]
|
||||
if comps[0] in ret:
|
||||
# maybe log a warning ?
|
||||
ret[comps[0]].extend(comps[1:])
|
||||
else:
|
||||
ret[comps[0]] = comps[1:]
|
||||
return ret
|
||||
list_hosts.hosts_filename = '/etc/hosts'
|
||||
|
||||
|
||||
def get_ip(host):
|
||||
|
@ -42,7 +47,7 @@ def get_ip(host):
|
|||
return ''
|
||||
# Look for the op
|
||||
for addr in hosts:
|
||||
if hosts[addr].count(host):
|
||||
if host in hosts[addr]:
|
||||
return addr
|
||||
# ip not found
|
||||
return ''
|
||||
|
@ -71,20 +76,20 @@ def has_pair(ip, alias):
|
|||
hosts = list_hosts()
|
||||
if ip not in hosts:
|
||||
return False
|
||||
if hosts[ip].count(alias):
|
||||
if alias in hosts[ip]:
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def set_host(ip, alias):
|
||||
'''
|
||||
Set the host entry in th hosts file for the given ip, this will overwrite
|
||||
Set the host entry in the hosts file for the given ip, this will overwrite
|
||||
any previous entry for the given ip
|
||||
|
||||
CLI Example::
|
||||
salt '*' hosts.set_host <ip> <alias>
|
||||
'''
|
||||
hfn = '/etc/hosts'
|
||||
hfn = set_host.hosts_filename
|
||||
ovr = False
|
||||
if not os.path.isfile(hfn):
|
||||
return False
|
||||
|
@ -97,13 +102,20 @@ def set_host(ip, alias):
|
|||
continue
|
||||
comps = tmpline.split()
|
||||
if comps[0] == ip:
|
||||
lines[ind] = ip + '\t\t' + alias + '\n'
|
||||
ovr = True
|
||||
if not ovr:
|
||||
lines[ind] = ip + '\t\t' + alias + '\n'
|
||||
ovr = True
|
||||
else: # remove other entries
|
||||
lines[ind] = ''
|
||||
if not ovr:
|
||||
# make sure there is a newline
|
||||
if lines and not lines[-1].endswith(('\n', '\r')):
|
||||
lines[-1] = '%s\n' % lines[-1]
|
||||
line = ip + '\t\t' + alias + '\n'
|
||||
lines.append(line)
|
||||
open(hfn, 'w+').writelines(lines)
|
||||
return True
|
||||
set_host.hosts_filename = '/etc/hosts'
|
||||
|
||||
|
||||
def rm_host(ip, alias):
|
||||
|
@ -115,7 +127,7 @@ def rm_host(ip, alias):
|
|||
'''
|
||||
if not has_pair(ip, alias):
|
||||
return True
|
||||
hfn = '/etc/hosts'
|
||||
hfn = rm_host.hosts_filename
|
||||
lines = open(hfn).readlines()
|
||||
for ind in range(len(lines)):
|
||||
tmpline = lines[ind].strip()
|
||||
|
@ -136,6 +148,7 @@ def rm_host(ip, alias):
|
|||
lines[ind] = newline
|
||||
open(hfn, 'w+').writelines(lines)
|
||||
return True
|
||||
rm_host.hosts_filename = '/etc/hosts'
|
||||
|
||||
|
||||
def add_host(ip, alias):
|
||||
|
@ -146,7 +159,7 @@ def add_host(ip, alias):
|
|||
CLI Example::
|
||||
salt '*' hosts.add_host <ip> <alias>
|
||||
'''
|
||||
hfn = '/etc/hosts'
|
||||
hfn = add_host.hosts_filename
|
||||
ovr = False
|
||||
if not os.path.isfile(hfn):
|
||||
return False
|
||||
|
@ -165,8 +178,14 @@ def add_host(ip, alias):
|
|||
newline += '\t' + alias
|
||||
lines.append(newline)
|
||||
ovr = True
|
||||
# leave any other matching entries alone
|
||||
break
|
||||
if not ovr:
|
||||
# make sure there is a newline
|
||||
if lines and not lines[-1].endswith(('\n', '\r')):
|
||||
lines[-1] = '%s\n' % lines[-1]
|
||||
line = ip + '\t\t' + alias + '\n'
|
||||
lines.append(line)
|
||||
open(hfn, 'w+').writelines(lines)
|
||||
return True
|
||||
add_host.hosts_filename = '/etc/hosts'
|
||||
|
|
|
@ -52,24 +52,21 @@ def available():
|
|||
for path in __salt__['cmd.run']('modprobe -l').split('\n'):
|
||||
bpath = os.path.basename(path)
|
||||
comps = bpath.split('.')
|
||||
if comps.count('ko'):
|
||||
if 'ko' in comps:
|
||||
# This is a kernel module, return it without the .ko extension
|
||||
ret.append('.'.join(comps[:comps.index('ko')]))
|
||||
return ret
|
||||
return sorted(list(ret))
|
||||
|
||||
|
||||
def check_available(mod):
|
||||
'''
|
||||
Check to see if the speciified kernel module is available
|
||||
Check to see if the specified kernel module is available
|
||||
|
||||
CLI Example::
|
||||
|
||||
salt '*' kmod.check_available kvm
|
||||
'''
|
||||
if available().count(mod):
|
||||
# the module is available, return True
|
||||
return True
|
||||
return False
|
||||
return mod in available()
|
||||
|
||||
|
||||
def lsmod():
|
||||
|
@ -87,10 +84,11 @@ def lsmod():
|
|||
continue
|
||||
if comps[0] == 'Module':
|
||||
continue
|
||||
mdat = {}
|
||||
mdat['module'] = comps[0]
|
||||
mdat['size'] = comps[1]
|
||||
mdat['depcount'] = comps[2]
|
||||
mdat = {
|
||||
'size': comps[1],
|
||||
'module': comps[0],
|
||||
'depcount': comps[2],
|
||||
}
|
||||
if len(comps) > 3:
|
||||
mdat['deps'] = comps[3].split(',')
|
||||
else:
|
||||
|
|
|
@ -24,9 +24,9 @@ def show():
|
|||
ret = {}
|
||||
out = __salt__['cmd.run'](cmd).split('\n')
|
||||
for line in out:
|
||||
if not line.count(' '):
|
||||
if not line:
|
||||
continue
|
||||
if not line.count(' = '):
|
||||
if ' = ' not in line:
|
||||
continue
|
||||
comps = line.split(' = ')
|
||||
ret[comps[0]] = comps[1]
|
||||
|
@ -63,7 +63,7 @@ def assign(name, value):
|
|||
|
||||
def persist(name, value, config='/etc/sysctl.conf'):
|
||||
'''
|
||||
Assign and persist a simple sysctl paramater for this minion
|
||||
Assign and persist a simple sysctl parameter for this minion
|
||||
|
||||
CLI Example::
|
||||
|
||||
|
@ -80,7 +80,7 @@ def persist(name, value, config='/etc/sysctl.conf'):
|
|||
if line.startswith('#'):
|
||||
nlines.append(line)
|
||||
continue
|
||||
if not line.count('='):
|
||||
if '=' not in line:
|
||||
nlines.append(line)
|
||||
continue
|
||||
comps = line.split('=')
|
||||
|
|
|
@ -20,7 +20,7 @@ def dirinfo(path, opts=None):
|
|||
|
||||
output = out['stdout'].split('\n')
|
||||
for line in output:
|
||||
if not line.count(' '):
|
||||
if not line:
|
||||
continue
|
||||
comps = line.split(':')
|
||||
ret[comps[0].strip()] = comps[1].strip()
|
||||
|
@ -42,7 +42,7 @@ def fileinfo(path):
|
|||
|
||||
output = out['stdout'].split('\n')
|
||||
for line in output:
|
||||
if not line.count(' '):
|
||||
if not line:
|
||||
continue
|
||||
if '/' in line:
|
||||
comps = line.split('/')
|
||||
|
@ -85,7 +85,7 @@ def mounts():
|
|||
|
||||
output = out['stdout'].split('\n')
|
||||
for line in output:
|
||||
if not line.count(' '):
|
||||
if not line:
|
||||
continue
|
||||
if 'fuse.mfs' in line:
|
||||
comps = line.split(' ')
|
||||
|
@ -130,7 +130,7 @@ def getgoal(path, opts=None):
|
|||
}
|
||||
else:
|
||||
for line in output:
|
||||
if not line.count(' '):
|
||||
if not line:
|
||||
continue
|
||||
if path in line:
|
||||
continue
|
||||
|
|
|
@ -113,7 +113,7 @@ def set_fstab(
|
|||
salt '*' mount.set_fstab /mnt/foo /dev/sdz1 ext4
|
||||
'''
|
||||
# Fix the opts type if it is a list
|
||||
if type(opts) == type(list()):
|
||||
if isinstance(opts, list):
|
||||
opts = ','.join(opts)
|
||||
lines = []
|
||||
change = False
|
||||
|
@ -189,7 +189,7 @@ def mount(name, device, mkmnt=False, fstype='', opts='defaults'):
|
|||
|
||||
salt '*' mount.mount /mnt/foo /dev/sdz1 True
|
||||
'''
|
||||
if type(opts) == type(str()):
|
||||
if isinstance(opts, basestring):
|
||||
opts = opts.split(',')
|
||||
if not os.path.exists(name) and mkmnt:
|
||||
os.makedirs(name)
|
||||
|
@ -212,7 +212,7 @@ def remount(name, device, mkmnt=False, fstype='', opts='defaults'):
|
|||
|
||||
salt '*' mount.remount /mnt/foo /dev/sdz1 True
|
||||
'''
|
||||
if type(opts) == type(str()):
|
||||
if isinstance(opts, basestring):
|
||||
opts = opts.split(',')
|
||||
mnts = active()
|
||||
if name in mnts:
|
||||
|
|
|
@ -17,16 +17,15 @@ import MySQLdb
|
|||
__opts__ = {}
|
||||
|
||||
|
||||
def connect():
|
||||
def connect(**kwargs):
|
||||
'''
|
||||
wrap authentication credentials here
|
||||
'''
|
||||
|
||||
hostname = __opts__['mysql.host']
|
||||
username = __opts__['mysql.user']
|
||||
password = __opts__['mysql.pass']
|
||||
dbport = __opts__['mysql.port']
|
||||
dbname = __opts__['mysql.db']
|
||||
hostname = kwargs.get('host', __opts__['mysql.host'])
|
||||
username = kwargs.get('user', __opts__['mysql.user'])
|
||||
password = kwargs.get('pass', __opts__['mysql.pass'])
|
||||
dbport = kwargs.get('port', __opts__['mysql.port'])
|
||||
dbname = kwargs.get('db', __opts__['mysql.db'])
|
||||
|
||||
db = MySQLdb.connect(
|
||||
hostname,
|
||||
|
@ -73,3 +72,67 @@ def version():
|
|||
cur.execute('SELECT VERSION()')
|
||||
row = cur.fetchone()
|
||||
return row
|
||||
|
||||
|
||||
def slave_lag():
|
||||
'''
|
||||
Return the number of seconds that a slave SQL server is lagging behind the
|
||||
master, if the host is not a slave it will return -1. If the server is
|
||||
configured to be a slave but replication but slave IO is not running then
|
||||
-2 will be returned.
|
||||
|
||||
CLI Example::
|
||||
|
||||
salt '*' mysql.slave_lag
|
||||
'''
|
||||
db = connect()
|
||||
cur = db.cursor(MySQLdb.cursors.DictCursor)
|
||||
cur.execute("show slave status")
|
||||
results = cur.fetchone()
|
||||
if cur.rowcount == 0:
|
||||
# Server is not a slave if master is not defined. Return empty tuple
|
||||
# in this case. Could probably check to see if Slave_IO_Running and
|
||||
# Slave_SQL_Running are both set to 'Yes' as well to be really really
|
||||
# sure that it is a slave.
|
||||
return -1
|
||||
else:
|
||||
if results['Slave_IO_Running'] == 'Yes':
|
||||
return results['Seconds_Behind_Master']
|
||||
else:
|
||||
# Replication is broken if you get here.
|
||||
return -2
|
||||
|
||||
|
||||
def free_slave():
|
||||
'''
|
||||
Frees a slave from its master. This is a WIP, do not use.
|
||||
'''
|
||||
slave_db = connect()
|
||||
slave_cur = slave_db.cursor(MySQLdb.cursors.DictCursor)
|
||||
slave_cur.execute("show slave status")
|
||||
slave_status = slave_cur.fetchone()
|
||||
master = {'host': slave_status['Master_Host']}
|
||||
|
||||
try:
|
||||
# Try to connect to the master and flush logs before promoting to
|
||||
# master. This may fail if the master is no longer available.
|
||||
# I am also assuming that the admin password is the same on both
|
||||
# servers here, and only overriding the host option in the connect
|
||||
# function.
|
||||
master_db = connect(**master)
|
||||
master_cur = master_db.cursor()
|
||||
master_cur.execute("flush logs")
|
||||
master_db.close()
|
||||
except MySQLdb.OperationalError:
|
||||
pass
|
||||
|
||||
slave_cur.execute("stop slave")
|
||||
slave_cur.execute("reset master")
|
||||
slave_cur.execute("change master to MASTER_HOST=''")
|
||||
slave_cur.execute("show slave status")
|
||||
results = slave_cur.fetchone()
|
||||
|
||||
if results is None:
|
||||
return 'promoted'
|
||||
else:
|
||||
return 'failed'
|
|
@ -4,7 +4,13 @@ Module for gathering and managing network information
|
|||
|
||||
from string import ascii_letters, digits
|
||||
import socket
|
||||
import subprocess
|
||||
import salt.utils
|
||||
|
||||
__outputter__ = {
|
||||
'dig': 'txt',
|
||||
'ping': 'txt',
|
||||
'netstat': 'txt',
|
||||
}
|
||||
|
||||
|
||||
def _sanitize_host(host):
|
||||
|
@ -25,13 +31,10 @@ def ping(host):
|
|||
salt '*' network.ping archlinux.org -c 4
|
||||
'''
|
||||
cmd = 'ping -c 4 %s' % _sanitize_host(host)
|
||||
|
||||
out = subprocess.Popen(cmd,
|
||||
shell=True,
|
||||
stdout=subprocess.PIPE).communicate()[0]
|
||||
return out
|
||||
return __salt__['cmd.run'](cmd)
|
||||
|
||||
|
||||
# FIXME: Does not work with: netstat 1.42 (2001-04-15) from net-tools 1.6.0 (Ubuntu 10.10)
|
||||
def netstat():
|
||||
'''
|
||||
Return information on open ports and states
|
||||
|
@ -40,14 +43,10 @@ def netstat():
|
|||
|
||||
salt '*' network.netstat
|
||||
'''
|
||||
cmd = 'netstat -tulpnea'
|
||||
ret = []
|
||||
out = subprocess.Popen(cmd,
|
||||
shell=True,
|
||||
stdout=subprocess.PIPE).communicate()[0].split('\n')
|
||||
cmd = 'netstat -tulpnea'
|
||||
out = __salt__['cmd.run'](cmd)
|
||||
for line in out:
|
||||
if not line.count(' '):
|
||||
continue
|
||||
comps = line.split()
|
||||
if line.startswith('tcp'):
|
||||
ret.append({
|
||||
|
@ -73,6 +72,8 @@ def netstat():
|
|||
return ret
|
||||
|
||||
|
||||
# FIXME: This is broken on: Modern traceroute for Linux, version 2.0.14, May 10 2010 (Ubuntu 10.10)
|
||||
# FIXME: traceroute is deprecated, make this fall back to tracepath
|
||||
def traceroute(host):
|
||||
'''
|
||||
Performs a traceroute to a 3rd party host
|
||||
|
@ -81,13 +82,12 @@ def traceroute(host):
|
|||
|
||||
salt '*' network.traceroute archlinux.org
|
||||
'''
|
||||
cmd = 'traceroute %s' % _sanitize_host(host)
|
||||
ret = []
|
||||
out = subprocess.Popen(cmd,
|
||||
shell=True,
|
||||
stdout=subprocess.PIPE).communicate()[0].split('\n')
|
||||
cmd = 'traceroute %s' % _sanitize_host(host)
|
||||
out = __salt__['cmd.run'](cmd)
|
||||
|
||||
for line in out:
|
||||
if not line.count(' '):
|
||||
if not ' ' in line:
|
||||
continue
|
||||
if line.startswith('traceroute'):
|
||||
continue
|
||||
|
@ -115,11 +115,7 @@ def dig(host):
|
|||
salt '*' network.dig archlinux.org
|
||||
'''
|
||||
cmd = 'dig %s' % _sanitize_host(host)
|
||||
|
||||
out = subprocess.Popen(cmd,
|
||||
shell=True,
|
||||
stdout=subprocess.PIPE).communicate()[0]
|
||||
return out
|
||||
return __salt__['cmd.run'](cmd)
|
||||
|
||||
|
||||
def isportopen(host, port):
|
||||
|
@ -138,4 +134,3 @@ def isportopen(host, port):
|
|||
out = sock.connect_ex((_sanitize_host(host), int(port)))
|
||||
|
||||
return out
|
||||
|
||||
|
|
|
@ -62,7 +62,7 @@ def list_pkgs():
|
|||
ret = {}
|
||||
out = __salt__['cmd.run'](cmd).split('\n')
|
||||
for line in out:
|
||||
if not line.count(' '):
|
||||
if not line:
|
||||
continue
|
||||
comps = line.split()
|
||||
ret[comps[0]] = comps[1]
|
||||
|
@ -88,9 +88,9 @@ def refresh_db():
|
|||
if not line:
|
||||
continue
|
||||
key = line.strip().split()[0]
|
||||
if line.count('is up to date'):
|
||||
if 'is up to date' in line:
|
||||
ret[key] = False
|
||||
elif line.count('downloading'):
|
||||
elif 'downloading' in line:
|
||||
ret[key] = True
|
||||
return ret
|
||||
|
||||
|
|
|
@ -49,6 +49,10 @@ def top(num_processes=5, interval=3):
|
|||
def get_pid_list():
|
||||
'''
|
||||
Return a list of process ids (PIDs) for all running processes.
|
||||
|
||||
CLI Example::
|
||||
|
||||
salt '*' ps.get_pid_list
|
||||
'''
|
||||
return psutil.get_pid_list()
|
||||
|
||||
|
@ -61,7 +65,11 @@ def cpu_percent(interval=0.1, per_cpu=False):
|
|||
the number of seconds to sample CPU usage over
|
||||
per_cpu
|
||||
if True return an array of CPU percent busy for each CPU, otherwise
|
||||
aggregate all precents into one number
|
||||
aggregate all percents into one number
|
||||
|
||||
CLI Example::
|
||||
|
||||
salt '*' ps.cpu_percent
|
||||
'''
|
||||
if per_cpu:
|
||||
result = []
|
||||
|
@ -79,7 +87,11 @@ def cpu_times(per_cpu=False):
|
|||
|
||||
per_cpu
|
||||
if True return an array of percents for each CPU, otherwise aggregate
|
||||
all precents into one number
|
||||
all percents into one number
|
||||
|
||||
CLI Example::
|
||||
|
||||
salt '*' ps.cpu_times
|
||||
'''
|
||||
if per_cpu:
|
||||
result = []
|
||||
|
@ -90,31 +102,47 @@ def cpu_times(per_cpu=False):
|
|||
return result
|
||||
|
||||
|
||||
def phymem_usage():
|
||||
def physical_memory_usage():
|
||||
'''
|
||||
Return a dict that describes free and available physical memory.
|
||||
|
||||
CLI Examples::
|
||||
|
||||
salt '*' ps.physical_memory_usage
|
||||
'''
|
||||
return dict(psutil.phymem_usage()._asdict())
|
||||
|
||||
|
||||
def virtmem_usage():
|
||||
def virtual_memory_usage():
|
||||
'''
|
||||
Return a dict that describes free and available memory, both physical
|
||||
and virtual.
|
||||
|
||||
CLI Example::
|
||||
|
||||
salt '*' virtual_memory_usage
|
||||
'''
|
||||
return dict(psutil.virtmem_usage()._asdict())
|
||||
|
||||
|
||||
def cached_phymem():
|
||||
def cached_physical_memory():
|
||||
'''
|
||||
Return the amount cached memory.
|
||||
|
||||
CLI Example::
|
||||
|
||||
salt '*' ps.cached_physical_memory
|
||||
'''
|
||||
return psutil.cached_phymem()
|
||||
|
||||
|
||||
def phymem_buffers():
|
||||
def physical_memory_buffers():
|
||||
'''
|
||||
Return the amount of physical memory buffers.
|
||||
|
||||
CLI Example::
|
||||
|
||||
salt '*' ps.physical_memory_buffers
|
||||
'''
|
||||
return psutil.phymem_buffers()
|
||||
|
||||
|
@ -127,6 +155,10 @@ def disk_partitions(all=False):
|
|||
all
|
||||
if set to False, only return local, physical partitions (hard disk,
|
||||
USB, CD/DVD partitions). If True, return all filesystems.
|
||||
|
||||
CLI Example::
|
||||
|
||||
salt '*' ps.disk_partitions
|
||||
'''
|
||||
result = []
|
||||
for partition in psutil.disk_partitions(all):
|
||||
|
@ -138,6 +170,10 @@ def disk_usage(path):
|
|||
'''
|
||||
Given a path, return a dict listing the total available space as well as
|
||||
the free space, and used space.
|
||||
|
||||
CLI Example::
|
||||
|
||||
salt '*' ps.disk_usage /home
|
||||
'''
|
||||
return dict(psutil.disk_usage(path)._asdict())
|
||||
|
||||
|
@ -146,6 +182,10 @@ def disk_partition_usage(all=False):
|
|||
'''
|
||||
Return a list of disk partitions plus the mount point, filesystem and usage
|
||||
statistics.
|
||||
|
||||
CLI Example::
|
||||
|
||||
salt '*' ps.disk_partition_usage
|
||||
'''
|
||||
result = disk_partitions(all)
|
||||
for partition in result:
|
||||
|
@ -153,22 +193,34 @@ def disk_partition_usage(all=False):
|
|||
return result
|
||||
|
||||
|
||||
def TOTAL_PHYMEM():
|
||||
def total_physical_memory():
|
||||
'''
|
||||
Return the total number of bytes of physical memory.
|
||||
|
||||
CLI Example::
|
||||
|
||||
salt '*' ps.total_physical_memory
|
||||
'''
|
||||
return psutil.TOTAL_PHYMEM
|
||||
|
||||
|
||||
def NUM_CPUS():
|
||||
def num_cpus():
|
||||
'''
|
||||
Return the number of CPUs.
|
||||
|
||||
CLI Example::
|
||||
|
||||
salt '*' ps.num_cpus
|
||||
'''
|
||||
return psutil.NUM_CPUS
|
||||
|
||||
|
||||
def BOOT_TIME():
|
||||
def boot_time():
|
||||
'''
|
||||
Return the boot time in number of seconds since the epoch began.
|
||||
|
||||
CLI Example::
|
||||
|
||||
salt '*' ps.boot_time
|
||||
'''
|
||||
return psutil.BOOT_TIME
|
||||
|
|
|
@ -5,6 +5,7 @@ Publish a command from a minion to a target
|
|||
import zmq
|
||||
|
||||
import salt.crypt
|
||||
import salt.payload
|
||||
|
||||
|
||||
def _get_socket():
|
||||
|
@ -35,6 +36,7 @@ def publish(tgt, fun, arg=None, expr_form='glob', returner=''):
|
|||
|
||||
salt system.example.com publish.publish '*' cmd.run 'ls -la /tmp'
|
||||
'''
|
||||
serial = salt.payload.Serial(__opts__)
|
||||
if fun == 'publish.publish':
|
||||
# Need to log something here
|
||||
return {}
|
||||
|
@ -55,5 +57,5 @@ def publish(tgt, fun, arg=None, expr_form='glob', returner=''):
|
|||
'id': __opts__['id']}
|
||||
payload['load'] = auth.crypticle.dumps(load)
|
||||
socket = _get_socket()
|
||||
socket.send_pyobj(payload)
|
||||
return auth.crypticle.loads(socket.recv_pyobj())
|
||||
socket.send(serial.dumps(payload))
|
||||
return auth.crypticle.loads(serial.loads(socket.recv()))
|
||||
|
|
|
@ -73,11 +73,11 @@ def getent():
|
|||
|
||||
def chgid(name, gid):
|
||||
'''
|
||||
Change the default shell of the user
|
||||
Change the gid for a named group
|
||||
|
||||
CLI Example::
|
||||
|
||||
salt '*' user.chshell foo /bin/zsh
|
||||
salt '*' group.chgid foo 4376
|
||||
'''
|
||||
pre_gid = __salt__['file.group_to_gid'](name)
|
||||
if gid == pre_gid:
|
||||
|
|
|
@ -27,7 +27,7 @@ def add(name,
|
|||
|
||||
salt '*' user.add name <uid> <gid> <groups> <home> <shell>
|
||||
'''
|
||||
if type(groups) == type(str()):
|
||||
if isinstance(groups, basestring):
|
||||
groups = groups.split(',')
|
||||
cmd = 'pw useradd -s {0} '.format(shell)
|
||||
if uid:
|
||||
|
@ -169,7 +169,7 @@ def chgroups(name, groups, append=False):
|
|||
|
||||
salt '*' user.chgroups foo wheel,root True
|
||||
'''
|
||||
if type(groups) == type(str()):
|
||||
if isinstance(groups, basestring):
|
||||
groups = groups.split(',')
|
||||
ugrps = set(list_groups(name))
|
||||
if ugrps == set(groups):
|
||||
|
@ -206,7 +206,7 @@ def info(name):
|
|||
|
||||
def list_groups(name):
|
||||
'''
|
||||
Return a list of groups the named user belings to
|
||||
Return a list of groups the named user belongs to
|
||||
|
||||
CLI Example::
|
||||
|
||||
|
|
125
salt/modules/salt.py
Normal file
125
salt/modules/salt.py
Normal file
|
@ -0,0 +1,125 @@
|
|||
'''
|
||||
The Salt module is used to manage the state of the salt minion itself. It is
|
||||
used to manage minion modules as well as automate updates to the salt minion
|
||||
'''
|
||||
|
||||
import os
|
||||
import logging
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
def _sync(form, env):
|
||||
'''
|
||||
Sync the given directory in the given environment
|
||||
'''
|
||||
ret = []
|
||||
source = os.path.join('salt://{0}'.format(env), '_{0}'.format(form))
|
||||
mod_dir = os.path.join(__opts__['extention_modules'], '{0}'.format(form))
|
||||
if not os.path.isdir(mod_dir):
|
||||
# Specified a non existing module type
|
||||
log.error('Failed to sync modules for {0}'.format(directory))
|
||||
return ret
|
||||
for fn_ in __salt__['cp.cache_dir'](source, env):
|
||||
dest = os.path.join(mod_dir,
|
||||
os.path.basename(fn_)
|
||||
)
|
||||
if os.path.isfile(dest):
|
||||
# The file is present, if the sum differes replace it
|
||||
srch = hashlib.md5(open(fn_, 'r').read()).hexdigest()
|
||||
dsth = hashlib.md5(open(dest, 'r').read()).hexdigest()
|
||||
if srch != dsth:
|
||||
# The downloaded file differes, replace!
|
||||
shutil.copy(fn_, dest)
|
||||
ret.append('{0}.{1}'.format(form, os.path.basename(fn_)))
|
||||
else:
|
||||
shutil.copy(fn_, dest)
|
||||
ret.append('{0}.{1}'.format(form, os.path.basename(fn_)))
|
||||
return ret
|
||||
|
||||
|
||||
def sync_modules(env='base'):
|
||||
'''
|
||||
Sync the modules from the _modules directory on the salt master file
|
||||
server. This function is environment aware, pass the desired environment
|
||||
to grab the contents of the _modules directory, base is the default
|
||||
environment.
|
||||
|
||||
CLI Example::
|
||||
|
||||
salt '*' salt.sync_modules
|
||||
'''
|
||||
return _sync('modules', env)
|
||||
|
||||
|
||||
def sync_states(env='base'):
|
||||
'''
|
||||
Sync the states from the _states directory on the salt master file
|
||||
server. This function is environment aware, pass the desired environment
|
||||
to grab the contents of the _states directory, base is the default
|
||||
environment.
|
||||
|
||||
CLI Example::
|
||||
|
||||
salt '*' salt.sync_states
|
||||
'''
|
||||
return _sync('states', env)
|
||||
|
||||
|
||||
def sync_grains(env='base'):
|
||||
'''
|
||||
Sync the grains from the _grains directory on the salt master file
|
||||
server. This function is environment aware, pass the desired environment
|
||||
to grab the contents of the _grains directory, base is the default
|
||||
environment.
|
||||
|
||||
CLI Example::
|
||||
|
||||
salt '*' salt.sync_grains
|
||||
'''
|
||||
return _sync('grains', env)
|
||||
|
||||
|
||||
def sync_renderers(env='base'):
|
||||
'''
|
||||
Sync the renderers from the _renderers directory on the salt master file
|
||||
server. This function is environment aware, pass the desired environment
|
||||
to grab the contents of the _renderers directory, base is the default
|
||||
environment.
|
||||
|
||||
CLI Example::
|
||||
|
||||
salt '*' salt.sync_renderers
|
||||
'''
|
||||
return _sync('renderers', env)
|
||||
|
||||
|
||||
def sync_returners(env='base'):
|
||||
'''
|
||||
Sync the returners from the _returners directory on the salt master file
|
||||
server. This function is environment aware, pass the desired environment
|
||||
to grab the contents of the _returners directory, base is the default
|
||||
environment.
|
||||
|
||||
CLI Example::
|
||||
|
||||
salt '*' salt.sync_returners
|
||||
'''
|
||||
return _sync('returners', env)
|
||||
|
||||
|
||||
def sync_all(env='base'):
|
||||
'''
|
||||
Sync down all of the dynamic modules from the file server for a specific
|
||||
environment
|
||||
|
||||
CLI Example::
|
||||
|
||||
salt '*' salt.sync_all
|
||||
'''
|
||||
ret = []
|
||||
ret.append(sync_modules(env))
|
||||
ret.append(sync_states(env))
|
||||
ret.append(sync_grains(env))
|
||||
ret.append(sync_renderers(env))
|
||||
ret.append(sync_returners(env))
|
||||
return ret
|
|
@ -12,6 +12,7 @@ grainmap = {
|
|||
'RedHat': '/etc/init.d',
|
||||
'Ubuntu': '/etc/init.d',
|
||||
'Gentoo': '/etc/init.d',
|
||||
'CentOS': '/etc/init.d',
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -12,7 +12,7 @@ def info(name):
|
|||
|
||||
CLI Example::
|
||||
|
||||
salt '*' shadow.user root
|
||||
salt '*' shadow.info root
|
||||
'''
|
||||
try:
|
||||
data = spwd.getspnam(name)
|
||||
|
@ -46,7 +46,7 @@ def set_password(name, password):
|
|||
|
||||
CLI Example::
|
||||
|
||||
salt '*' root $1$UYCIxa628.9qXjpQCjM4a..
|
||||
salt '*' shadow.set_password root $1$UYCIxa628.9qXjpQCjM4a..
|
||||
'''
|
||||
s_file = '/etc/shadow'
|
||||
ret = {}
|
||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -12,9 +12,9 @@ def _refine_enc(enc):
|
|||
'''
|
||||
rsa = ['r', 'rsa', 'ssh-rsa']
|
||||
dss = ['d', 'dsa', 'dss', 'ssh-dss']
|
||||
if rsa.count(enc):
|
||||
if enc in rsa:
|
||||
return 'ssh-rsa'
|
||||
elif dss.count(enc):
|
||||
elif enc in dss:
|
||||
return 'ssh-dss'
|
||||
else:
|
||||
return 'ssh-rsa'
|
||||
|
@ -86,7 +86,7 @@ def host_keys(keydir=None):
|
|||
# Set up the default keydir - needs to support sshd_config parsing in the
|
||||
# future
|
||||
if not keydir:
|
||||
if __grains__['Linux']:
|
||||
if __grains__['kernel'] == 'Linux':
|
||||
keydir = '/etc/ssh'
|
||||
keys = {}
|
||||
for fn_ in os.listdir(keydir):
|
||||
|
|
|
@ -2,11 +2,15 @@
|
|||
Control the state system on the minion
|
||||
'''
|
||||
|
||||
import os
|
||||
|
||||
import salt.state
|
||||
|
||||
|
||||
__outputter__ = {
|
||||
'highstate': 'highstate',
|
||||
'sls': 'highstate',
|
||||
'top': 'highstate',
|
||||
}
|
||||
|
||||
|
||||
|
@ -75,9 +79,36 @@ def highstate():
|
|||
return st_.call_highstate()
|
||||
|
||||
|
||||
def sls(mods, env='base'):
|
||||
'''
|
||||
Execute a set list of state modules from an environment, default
|
||||
environment is base
|
||||
|
||||
CLI Example:
|
||||
|
||||
salt '*' state.sls core,edit.vim dev
|
||||
'''
|
||||
st_ = salt.state.HighState(__opts__)
|
||||
if isinstance(mods, str):
|
||||
mods = mods.split(',')
|
||||
high, errors = st_.render_highstate({env: mods})
|
||||
if errors:
|
||||
return errors
|
||||
return st_.state.call_high(high)
|
||||
|
||||
|
||||
def top(topfn):
|
||||
'''
|
||||
Execute a specific top file instead of the default
|
||||
'''
|
||||
st_ = salt.state.HighState(__opts__)
|
||||
st_.opts['state_top'] = os.path.join('salt://', topfn)
|
||||
return st_.call_highstate()
|
||||
|
||||
|
||||
def show_highstate():
|
||||
'''
|
||||
Retrive the highstate data from the salt master and display it
|
||||
Retrieve the highstate data from the salt master and display it
|
||||
|
||||
CLI Example::
|
||||
|
||||
|
|
|
@ -6,8 +6,6 @@ These data can be useful for compiling into stats later.
|
|||
import fnmatch
|
||||
import os
|
||||
import re
|
||||
import subprocess
|
||||
|
||||
|
||||
__opts__ = {}
|
||||
|
||||
|
@ -71,8 +69,7 @@ def uptime():
|
|||
|
||||
salt '*' status.uptime
|
||||
'''
|
||||
return subprocess.Popen(['uptime'],
|
||||
stdout=subprocess.PIPE).communicate()[0].strip()
|
||||
return __salt__['cmd.run']('uptime').strip()
|
||||
|
||||
|
||||
def loadavg():
|
||||
|
@ -107,7 +104,7 @@ def cpustats():
|
|||
stats = open(procf, 'r').read().split('\n')
|
||||
ret = {}
|
||||
for line in stats:
|
||||
if not line.count(' '):
|
||||
if not line:
|
||||
continue
|
||||
comps = line.split()
|
||||
if comps[0] == 'cpu':
|
||||
|
@ -144,7 +141,7 @@ def meminfo():
|
|||
stats = open(procf, 'r').read().split('\n')
|
||||
ret = {}
|
||||
for line in stats:
|
||||
if not line.count(' '):
|
||||
if not line:
|
||||
continue
|
||||
comps = line.split()
|
||||
comps[0] = comps[0].replace(':', '')
|
||||
|
@ -170,7 +167,7 @@ def cpuinfo():
|
|||
stats = open(procf, 'r').read().split('\n')
|
||||
ret = {}
|
||||
for line in stats:
|
||||
if not line.count(' '):
|
||||
if not line:
|
||||
continue
|
||||
comps = line.split(':')
|
||||
comps[0] = comps[0].strip()
|
||||
|
@ -195,7 +192,7 @@ def diskstats():
|
|||
stats = open(procf, 'r').read().split('\n')
|
||||
ret = {}
|
||||
for line in stats:
|
||||
if not line.count(' '):
|
||||
if not line:
|
||||
continue
|
||||
comps = line.split()
|
||||
ret[comps[2]] = {'major': _number(comps[0]),
|
||||
|
@ -285,7 +282,7 @@ def vmstats():
|
|||
stats = open(procf, 'r').read().split('\n')
|
||||
ret = {}
|
||||
for line in stats:
|
||||
if not line.count(' '):
|
||||
if not line:
|
||||
continue
|
||||
comps = line.split()
|
||||
ret[comps[0]] = _number(comps[1])
|
||||
|
@ -307,7 +304,7 @@ def netstats():
|
|||
ret = {}
|
||||
headers = ['']
|
||||
for line in stats:
|
||||
if not line.count(' '):
|
||||
if not line:
|
||||
continue
|
||||
comps = line.split()
|
||||
if comps[0] == headers[0]:
|
||||
|
@ -339,7 +336,7 @@ def netdev():
|
|||
stats = open(procf, 'r').read().split('\n')
|
||||
ret = {}
|
||||
for line in stats:
|
||||
if not line.count(' '):
|
||||
if not line:
|
||||
continue
|
||||
if line.find(':') < 0:
|
||||
continue
|
||||
|
@ -376,12 +373,10 @@ def w():
|
|||
|
||||
salt '*' status.w
|
||||
'''
|
||||
users = subprocess.Popen(['w -h'],
|
||||
shell=True,
|
||||
stdout=subprocess.PIPE).communicate()[0].split('\n')
|
||||
user_list = []
|
||||
users = __salt__['cmd.run']('w -h').split('\n')
|
||||
for row in users:
|
||||
if not row.count(' '):
|
||||
if not row:
|
||||
continue
|
||||
comps = row.split()
|
||||
rec = {'idle': comps[3],
|
||||
|
@ -397,7 +392,7 @@ def w():
|
|||
|
||||
def all_status():
|
||||
'''
|
||||
Return a composite of all status data and info for this minoon.
|
||||
Return a composite of all status data and info for this minion.
|
||||
Warning: There is a LOT here!
|
||||
|
||||
CLI Example::
|
||||
|
|
54
salt/modules/systemd.py
Normal file
54
salt/modules/systemd.py
Normal file
|
@ -0,0 +1,54 @@
|
|||
'''
|
||||
Provide the service module for systemd
|
||||
'''
|
||||
|
||||
def __virtual__():
|
||||
'''
|
||||
Only work on systems which default to systemd
|
||||
'''
|
||||
if __grains__['os'] == 'Fedora' and __grains__['osrelease'] > 15:
|
||||
return 'service'
|
||||
return False
|
||||
|
||||
def start(name):
|
||||
'''
|
||||
Start the specified service with systemd
|
||||
|
||||
CLI Example::
|
||||
|
||||
salt '*' service.start <service name>
|
||||
'''
|
||||
cmd = 'systemctl start {0}.service'.format(name)
|
||||
return not __salt__['cmd.retcode'](cmd)
|
||||
|
||||
def stop(name):
|
||||
'''
|
||||
Stop the specifed service with systemd
|
||||
|
||||
CLI Example::
|
||||
|
||||
salt '*' service.stop <service name>
|
||||
'''
|
||||
cmd = 'systemctl stop {0}.service'.format(name)
|
||||
return not __salt__['cmd.retcode'](cmd)
|
||||
|
||||
def restart(name):
|
||||
'''
|
||||
Start the specified service with systemd
|
||||
|
||||
CLI Example::
|
||||
|
||||
salt '*' service.start <service name>
|
||||
'''
|
||||
cmd = 'systemctl restart {0}.service'.format(name)
|
||||
return not __salt__['cmd.retcode'](cmd)
|
||||
|
||||
def status(name):
|
||||
'''
|
||||
Return the status for a service via systemd, returns the PID if the service
|
||||
is running or an empty string if the service is not running
|
||||
'''
|
||||
cmd = ("systemctl restart {0}.service"
|
||||
" | awk '/Main PID/{print $3}'").format(name)
|
||||
return __salt__['cmd.run'](cmd).strip()
|
||||
|
|
@ -38,6 +38,17 @@ def ping():
|
|||
'''
|
||||
return True
|
||||
|
||||
def version():
|
||||
'''
|
||||
Return the version of salt on the minion
|
||||
|
||||
CLI Example::
|
||||
|
||||
salt '*' test.version
|
||||
'''
|
||||
import salt
|
||||
return salt.__version__
|
||||
|
||||
|
||||
def conf_test():
|
||||
'''
|
||||
|
|
|
@ -27,7 +27,7 @@ def version():
|
|||
out = __salt__['cmd.run'](cmd).split('\n')
|
||||
ret = out[0].split(': ')
|
||||
for line in out:
|
||||
if not line.count(' '):
|
||||
if not line:
|
||||
continue
|
||||
if 'Server version' in line:
|
||||
comps = line.split(': ')
|
||||
|
@ -46,7 +46,7 @@ def fullversion():
|
|||
ret = {}
|
||||
out = __salt__['cmd.run'](cmd).split('\n')
|
||||
for line in out:
|
||||
if not line.count(' '):
|
||||
if not line:
|
||||
continue
|
||||
if ': ' in line:
|
||||
comps = line.split(': ')
|
||||
|
|
|
@ -26,7 +26,7 @@ def add(name,
|
|||
|
||||
salt '*' user.add name <uid> <gid> <groups> <home> <shell>
|
||||
'''
|
||||
if type(groups) == type(str()):
|
||||
if isinstance(groups, basestring):
|
||||
groups = groups.split(',')
|
||||
cmd = 'useradd -s {0} '.format(shell)
|
||||
if uid:
|
||||
|
@ -170,7 +170,7 @@ def chgroups(name, groups, append=False):
|
|||
|
||||
salt '*' user.chgroups foo wheel,root True
|
||||
'''
|
||||
if type(groups) == type(str()):
|
||||
if isinstance(groups, basestring):
|
||||
groups = groups.split(',')
|
||||
ugrps = set(list_groups(name))
|
||||
if ugrps == set(groups):
|
||||
|
@ -194,20 +194,29 @@ def info(name):
|
|||
salt '*' user.info root
|
||||
'''
|
||||
ret = {}
|
||||
data = pwd.getpwnam(name)
|
||||
ret['gid'] = data.pw_gid
|
||||
ret['groups'] = list_groups(name)
|
||||
ret['home'] = data.pw_dir
|
||||
ret['name'] = data.pw_name
|
||||
ret['passwd'] = data.pw_passwd
|
||||
ret['shell'] = data.pw_shell
|
||||
ret['uid'] = data.pw_uid
|
||||
try:
|
||||
data = pwd.getpwnam(name)
|
||||
ret['gid'] = data.pw_gid
|
||||
ret['groups'] = list_groups(name)
|
||||
ret['home'] = data.pw_dir
|
||||
ret['name'] = data.pw_name
|
||||
ret['passwd'] = data.pw_passwd
|
||||
ret['shell'] = data.pw_shell
|
||||
ret['uid'] = data.pw_uid
|
||||
except KeyError:
|
||||
ret['gid'] = ''
|
||||
ret['groups'] = ''
|
||||
ret['home'] = ''
|
||||
ret['name'] = ''
|
||||
ret['passwd'] = ''
|
||||
ret['shell'] = ''
|
||||
ret['uid'] = ''
|
||||
return ret
|
||||
|
||||
|
||||
def list_groups(name):
|
||||
'''
|
||||
Return a list of groups the named user belings to
|
||||
Return a list of groups the named user belongs to
|
||||
|
||||
CLI Example::
|
||||
|
||||
|
|
|
@ -40,7 +40,7 @@ def _get_dom(vm_):
|
|||
Return a domain object for the named vm
|
||||
'''
|
||||
conn = __get_conn()
|
||||
if not list_vms().count(vm_):
|
||||
if vm_ not in list_vms():
|
||||
raise Exception('The specified vm is not present')
|
||||
return conn.lookupByName(vm_)
|
||||
|
||||
|
@ -169,12 +169,12 @@ def get_disks(vm_):
|
|||
target = targets[0]
|
||||
else:
|
||||
continue
|
||||
if target.attributes.keys().count('dev')\
|
||||
and source.attributes.keys().count('file'):
|
||||
disks[target.getAttribute('dev')] =\
|
||||
if 'dev' in target.attributes.keys() \
|
||||
and 'file' in source.attributes.keys():
|
||||
disks[target.getAttribute('dev')] = \
|
||||
{'file': source.getAttribute('file')}
|
||||
for dev in disks:
|
||||
disks[dev].update(yaml.safe_load(subprocess.Popen('qemu-img info '\
|
||||
disks[dev].update(yaml.safe_load(subprocess.Popen('qemu-img info ' \
|
||||
+ disks[dev]['file'],
|
||||
shell=True,
|
||||
stdout=subprocess.PIPE).communicate()[0]))
|
||||
|
@ -405,6 +405,33 @@ def seed_non_shared_migrate(disks, force=False):
|
|||
return True
|
||||
|
||||
|
||||
def set_autostart(vm_, state='on'):
|
||||
'''
|
||||
Set the autostart flag on a VM so that the VM will start with the host
|
||||
system on reboot.
|
||||
|
||||
CLI Example::
|
||||
salt "*" virt.enable_autostart <vm name> <on | off>
|
||||
'''
|
||||
|
||||
dom = _get_dom(vm_)
|
||||
|
||||
if state == 'on':
|
||||
if dom.setAutostart(1) == 0:
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
elif state == 'off':
|
||||
if dom.setAutostart(0) == 0:
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
else:
|
||||
# return False if state is set to something other then on or off
|
||||
return False
|
||||
|
||||
def destroy(vm_):
|
||||
'''
|
||||
Hard power down the virtual machine, this is equivalent to pulling the
|
||||
|
@ -443,7 +470,7 @@ def purge(vm_, dirs=False):
|
|||
'''
|
||||
Recursively destroy and delete a virtual machine, pass True for dir's to
|
||||
also delete the directories containing the virtual machine disk images -
|
||||
USE WITH EXTREAME CAUTION!
|
||||
USE WITH EXTREME CAUTION!
|
||||
|
||||
CLI Example::
|
||||
|
||||
|
@ -482,11 +509,9 @@ def is_kvm_hyper():
|
|||
'''
|
||||
if __grains__['virtual'] != 'physical':
|
||||
return False
|
||||
if not open('/proc/modules').read().count('kvm_'):
|
||||
if 'kvm_' not in open('/proc/modules').read():
|
||||
return False
|
||||
libvirt_ret = subprocess.Popen('ps aux',
|
||||
shell=True,
|
||||
stdout=subprocess.PIPE).communicate()[0].count('libvirtd')
|
||||
libvirt_ret = __salt__['cmd.run'](__grains__['ps']).count('libvirtd')
|
||||
if not libvirt_ret:
|
||||
return False
|
||||
return True
|
||||
|
|
|
@ -3,16 +3,29 @@ Support for YUM
|
|||
'''
|
||||
import yum
|
||||
import rpm
|
||||
import logging
|
||||
from rpmUtils.arch import getBaseArch
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
def __virtual__():
|
||||
'''
|
||||
Confine this module to yum based systems
|
||||
'''
|
||||
# We don't need to support pre-yum OSes because they don't support
|
||||
# python <= 2.6
|
||||
dists = 'CentOS Scientific RedHat Fedora'
|
||||
return 'pkg' if dists.count(__grains__['os']) else False
|
||||
# Return this for pkg on RHEL/Fedora based distros that ship with python
|
||||
# 2.6 or greater.
|
||||
dists = ('CentOS', 'Scientific', 'RedHat')
|
||||
if __grains__['os'] == 'Fedora':
|
||||
if int(__grains__['osrelease'].split('.')[0]) >= 11:
|
||||
return 'pkg'
|
||||
else:
|
||||
return False
|
||||
else:
|
||||
if __grains__['os'] in dists:
|
||||
if int(__grains__['osrelease'].split('.')[0]) >= 6:
|
||||
return 'pkg'
|
||||
else:
|
||||
return False
|
||||
|
||||
|
||||
def _list_removed(old, new):
|
||||
|
@ -23,14 +36,14 @@ def _list_removed(old, new):
|
|||
for pkg in old:
|
||||
if pkg not in new:
|
||||
pkgs.append(pkg)
|
||||
|
||||
|
||||
return pkgs
|
||||
|
||||
|
||||
def _compare_versions(old, new):
|
||||
'''
|
||||
Returns a dict that that displays old and new versions for a package after
|
||||
install/upgrade of package.
|
||||
install/upgrade of package.
|
||||
'''
|
||||
pkgs = {}
|
||||
for npkg in new:
|
||||
|
@ -56,14 +69,14 @@ def available_version(name):
|
|||
CLI Example::
|
||||
|
||||
salt '*' pkg.available_version <package name>
|
||||
'''
|
||||
yb = yum.YumBase()
|
||||
# look for available packages only, if package is already installed with
|
||||
'''
|
||||
yb = yum.YumBase()
|
||||
# look for available packages only, if package is already installed with
|
||||
# latest version it will not show up here. If we want to use wildcards
|
||||
# here we can, but for now its exactmatch only.
|
||||
# here we can, but for now its exact match only.
|
||||
versions_list = []
|
||||
for pkgtype in ['available', 'updates']:
|
||||
|
||||
|
||||
pl = yb.doPackageLists(pkgtype)
|
||||
exactmatch, matched, unmatched = yum.packages.parsePackages(pl, [name])
|
||||
# build a list of available packages from either available or updates
|
||||
|
@ -73,17 +86,17 @@ def available_version(name):
|
|||
for pkg in exactmatch:
|
||||
if pkg.arch == getBaseArch():
|
||||
versions_list.append('-'.join([pkg.version, pkg.release]))
|
||||
|
||||
|
||||
if len(versions_list) == 0:
|
||||
# if versions_list is empty return empty string. It may make sense
|
||||
# to also check if a package is installed and on latest version
|
||||
# already and return a message saying 'up to date' or something along
|
||||
# those lines.
|
||||
return ''
|
||||
|
||||
|
||||
# remove the duplicate items from the list and return the first one
|
||||
return list(set(versions_list))[0]
|
||||
|
||||
|
||||
|
||||
def version(name):
|
||||
'''
|
||||
|
@ -121,7 +134,7 @@ def list_pkgs(*args):
|
|||
for arg in args:
|
||||
for h in ts.dbMatch('name', arg):
|
||||
pkgs[h['name']] = '-'.join([h['version'],h['release']])
|
||||
|
||||
|
||||
return pkgs
|
||||
|
||||
|
||||
|
@ -137,7 +150,7 @@ def refresh_db():
|
|||
yb = yum.YumBase()
|
||||
yb.cleanMetadata()
|
||||
return True
|
||||
|
||||
|
||||
|
||||
def clean_metadata():
|
||||
'''
|
||||
|
@ -152,7 +165,7 @@ def clean_metadata():
|
|||
|
||||
def install(pkgs, refresh=False):
|
||||
'''
|
||||
Install the passed package(s), add refresh=True to clean out the yum
|
||||
Install the passed package(s), add refresh=True to clean out the yum
|
||||
database before executing
|
||||
|
||||
Return a dict containing the new package names and versions::
|
||||
|
@ -162,19 +175,26 @@ def install(pkgs, refresh=False):
|
|||
|
||||
CLI Example::
|
||||
|
||||
salt '*' pkg.install <package,package,package>
|
||||
salt '*' pkg.install 'package package package'
|
||||
'''
|
||||
if refresh:
|
||||
refresh_db()
|
||||
|
||||
pkgs = pkgs.split(',')
|
||||
|
||||
if ',' in pkgs:
|
||||
pkgs = pkgs.split(',')
|
||||
else:
|
||||
pkgs = pkgs.split(' ')
|
||||
|
||||
old = list_pkgs(*pkgs)
|
||||
|
||||
|
||||
yb = yum.YumBase()
|
||||
setattr(yb.conf, 'assumeyes', True)
|
||||
|
||||
|
||||
for pkg in pkgs:
|
||||
yb.install(name=pkg)
|
||||
try:
|
||||
yb.install(name=pkg)
|
||||
except yum.Errors.InstallError:
|
||||
log.error('Package {0} failed to install'.format(pkg))
|
||||
# Resolve Deps before attempting install. This needs to be improved
|
||||
# by also tracking any deps that may get upgraded/installed during this
|
||||
# process. For now only the version of the package(s) you request be
|
||||
|
@ -184,7 +204,7 @@ def install(pkgs, refresh=False):
|
|||
yb.closeRpmDB()
|
||||
|
||||
new = list_pkgs(*pkgs)
|
||||
|
||||
|
||||
return _compare_versions(old, new)
|
||||
|
||||
|
||||
|
@ -204,17 +224,17 @@ def upgrade():
|
|||
|
||||
yb = yum.YumBase()
|
||||
setattr(yb.conf, 'assumeyes', True)
|
||||
|
||||
|
||||
old = list_pkgs()
|
||||
|
||||
|
||||
# ideally we would look in the yum transaction and get info on all the
|
||||
# packages that are going to be upgraded and only look up old/new version
|
||||
# info on those packages.
|
||||
# info on those packages.
|
||||
yb.update()
|
||||
yb.resolveDeps()
|
||||
yb.processTransaction(rpmDisplay=yum.rpmtrans.NoOutputCallBack())
|
||||
yb.closeRpmDB()
|
||||
|
||||
|
||||
new = list_pkgs()
|
||||
return _compare_versions(old, new)
|
||||
|
||||
|
@ -229,22 +249,22 @@ def remove(pkgs):
|
|||
|
||||
salt '*' pkg.remove <package,package,package>
|
||||
'''
|
||||
|
||||
|
||||
yb = yum.YumBase()
|
||||
setattr(yb.conf, 'assumeyes', True)
|
||||
pkgs = pkgs.split(',')
|
||||
old = list_pkgs(*pkgs)
|
||||
|
||||
|
||||
# same comments as in upgrade for remove.
|
||||
for pkg in pkgs:
|
||||
yb.remove(name=pkg)
|
||||
|
||||
|
||||
yb.resolveDeps()
|
||||
yb.processTransaction(rpmDisplay=yum.rpmtrans.NoOutputCallBack())
|
||||
yb.closeRpmDB()
|
||||
|
||||
|
||||
new = list_pkgs(*pkgs)
|
||||
|
||||
|
||||
return _list_removed(old, new)
|
||||
|
||||
|
||||
|
|
214
salt/modules/yumpkg5.py
Normal file
214
salt/modules/yumpkg5.py
Normal file
|
@ -0,0 +1,214 @@
|
|||
'''
|
||||
Support for YUM
|
||||
'''
|
||||
import logging
|
||||
from collections import namedtuple
|
||||
|
||||
from salt.exceptions import PkgParseError
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
def __virtual__():
|
||||
'''
|
||||
Confine this module to yum based systems
|
||||
'''
|
||||
# Return this for pkg on RHEL/Fedora based distros that do not ship with
|
||||
# python 2.6 or greater.
|
||||
dists = ('CentOS', 'Scientific', 'RedHat')
|
||||
if __grains__['os'] == 'Fedora':
|
||||
if int(__grains__['osrelease'].split('.')[0]) < 11:
|
||||
return 'pkg'
|
||||
else:
|
||||
return False
|
||||
else:
|
||||
if __grains__['os'] in dists:
|
||||
if int(__grains__['osrelease'].split('.')[0]) <= 5:
|
||||
return 'pkg'
|
||||
else:
|
||||
return False
|
||||
|
||||
|
||||
def _parse_yum(arg):
|
||||
'''
|
||||
A small helper to parse yum output; returns a list of namedtuples
|
||||
'''
|
||||
cmd = 'yum -q {0}'.format(arg)
|
||||
out = __salt__['cmd.run_stdout'](cmd)
|
||||
YumOut = namedtuple('YumOut', ('name', 'version', 'status'))
|
||||
|
||||
try:
|
||||
results = map(YumOut._make,
|
||||
[i.split() for i in out.split('\n') if len(i.split()) == 3])
|
||||
except TypeError as exc:
|
||||
results = ()
|
||||
msg = "Could not parse yum output for: {0}".format(cmd)
|
||||
logger.debug(msg, exc_info=exc)
|
||||
|
||||
return results
|
||||
|
||||
|
||||
def _list_removed(old, new):
|
||||
'''
|
||||
List the packages which have been removed between the two package objects
|
||||
'''
|
||||
pkgs = []
|
||||
for pkg in old:
|
||||
if pkg not in new:
|
||||
pkgs.append(pkg)
|
||||
return pkgs
|
||||
|
||||
|
||||
def available_version(name):
|
||||
'''
|
||||
The available version of the package in the repository
|
||||
|
||||
CLI Example::
|
||||
|
||||
salt '*' pkg.available_version <package name>
|
||||
'''
|
||||
out = _parse_yum('list {0}'.format(name))
|
||||
return out[0].version if out else ''
|
||||
|
||||
|
||||
def version(name):
|
||||
'''
|
||||
Returns a version if the package is installed, else returns an empty string
|
||||
|
||||
CLI Example::
|
||||
|
||||
salt '*' pkg.version <package name>
|
||||
'''
|
||||
pkgs = list_pkgs()
|
||||
if name in pkgs:
|
||||
return pkgs[name]
|
||||
else:
|
||||
return ''
|
||||
|
||||
|
||||
def list_pkgs():
|
||||
'''
|
||||
List the packages currently installed in a dict::
|
||||
|
||||
{'<package_name>': '<version>'}
|
||||
|
||||
CLI Example::
|
||||
|
||||
salt '*' pkg.list_pkgs
|
||||
'''
|
||||
out = _parse_yum('list installed')
|
||||
return dict([(i.name, i.version) for i in out])
|
||||
|
||||
def refresh_db():
|
||||
'''
|
||||
Since yum refreshes the database automatically, this runs a yum clean,
|
||||
so that the next yum operation will have a clean database
|
||||
|
||||
CLI Example::
|
||||
|
||||
salt '*' pkg.refresh_db
|
||||
'''
|
||||
cmd = 'yum clean dbcache'
|
||||
retcode = __salt__['cmd.retcode'](cmd)
|
||||
return True
|
||||
|
||||
|
||||
def install(pkg, refresh=False):
|
||||
'''
|
||||
Install the passed package, add refresh=True to clean out the yum database
|
||||
before executing
|
||||
|
||||
Return a dict containing the new package names and versions::
|
||||
|
||||
{'<package>': {'old': '<old-version>',
|
||||
'new': '<new-version>']}
|
||||
|
||||
CLI Example::
|
||||
|
||||
salt '*' pkg.install <package name>
|
||||
'''
|
||||
old = list_pkgs()
|
||||
cmd = 'yum -y install ' + pkg
|
||||
if refresh:
|
||||
refresh_db()
|
||||
retcode = __salt__['cmd.retcode'](cmd)
|
||||
new = list_pkgs()
|
||||
pkgs = {}
|
||||
for npkg in new:
|
||||
if npkg in old:
|
||||
if old[npkg] == new[npkg]:
|
||||
# no change in the package
|
||||
continue
|
||||
else:
|
||||
# the package was here before and the version has changed
|
||||
pkgs[npkg] = {'old': old[npkg],
|
||||
'new': new[npkg]}
|
||||
else:
|
||||
# the package is freshly installed
|
||||
pkgs[npkg] = {'old': '',
|
||||
'new': new[npkg]}
|
||||
return pkgs
|
||||
|
||||
|
||||
def upgrade():
|
||||
'''
|
||||
Run a full system upgrade, a yum upgrade
|
||||
|
||||
Return a dict containing the new package names and versions::
|
||||
|
||||
{'<package>': {'old': '<old-version>',
|
||||
'new': '<new-version>']}
|
||||
|
||||
CLI Example::
|
||||
|
||||
salt '*' pkg.upgrade
|
||||
'''
|
||||
old = list_pkgs()
|
||||
cmd = 'yum -y upgrade'
|
||||
retcode = __salt__['cmd.retcode'](cmd)
|
||||
new = list_pkgs()
|
||||
pkgs = {}
|
||||
for npkg in new:
|
||||
if npkg in old:
|
||||
if old[npkg] == new[npkg]:
|
||||
# no change in the package
|
||||
continue
|
||||
else:
|
||||
# the package was here before and the version has changed
|
||||
pkgs[npkg] = {'old': old[npkg],
|
||||
'new': new[npkg]}
|
||||
else:
|
||||
# the package is freshly installed
|
||||
pkgs[npkg] = {'old': '',
|
||||
'new': new[npkg]}
|
||||
return pkgs
|
||||
|
||||
|
||||
def remove(pkg):
|
||||
'''
|
||||
Remove a single package with yum remove
|
||||
|
||||
Return a list containing the removed packages:
|
||||
|
||||
CLI Example::
|
||||
|
||||
salt '*' pkg.remove <package name>
|
||||
'''
|
||||
old = list_pkgs()
|
||||
cmd = 'yum -y remove ' + pkg
|
||||
retcode = __salt__['cmd.retcode'](cmd)
|
||||
new = list_pkgs()
|
||||
return _list_removed(old, new)
|
||||
|
||||
|
||||
def purge(pkg):
|
||||
'''
|
||||
Yum does not have a purge, this function calls remove
|
||||
|
||||
Return a list containing the removed packages:
|
||||
|
||||
CLI Example::
|
||||
|
||||
salt '*' pkg.purge <package name>
|
||||
'''
|
||||
return remove(pkg)
|
||||
|
20
salt/msgpack/__init__.py
Normal file
20
salt/msgpack/__init__.py
Normal file
|
@ -0,0 +1,20 @@
|
|||
# coding: utf-8
|
||||
from salt.msgpack.__version__ import *
|
||||
from salt.msgpack._msgpack import *
|
||||
|
||||
# alias for compatibility to simplejson/marshal/pickle.
|
||||
load = unpack
|
||||
loads = unpackb
|
||||
|
||||
dump = pack
|
||||
dumps = packb
|
||||
|
||||
def packs(*args, **kw):
|
||||
from warnings import warn
|
||||
warn("msgpack.packs() is deprecated. Use packb() instead.", DeprecationWarning)
|
||||
return packb(*args, **kw)
|
||||
|
||||
def unpacks(*args, **kw):
|
||||
from warnings import warn
|
||||
warn("msgpack.unpacks() is deprecated. Use unpackb() instead.", DeprecationWarning)
|
||||
return unpackb(*args, **kw)
|
1
salt/msgpack/__version__.py
Normal file
1
salt/msgpack/__version__.py
Normal file
|
@ -0,0 +1 @@
|
|||
version = (0, 1, 12)
|
6762
salt/msgpack/_msgpack.c
Normal file
6762
salt/msgpack/_msgpack.c
Normal file
File diff suppressed because it is too large
Load diff
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Reference in a new issue