mirror of
https://github.com/saltstack/salt.git
synced 2025-04-17 10:10:20 +00:00
we don't need a full copy of everything in the debian package
This commit is contained in:
parent
a7dd3cb449
commit
8a5d770a1a
106 changed files with 0 additions and 11891 deletions
5
debian/salt/DEBIAN/conffiles
vendored
5
debian/salt/DEBIAN/conffiles
vendored
|
@ -1,5 +0,0 @@
|
|||
/etc/salt/master
|
||||
/etc/salt/minion
|
||||
/etc/init.d/salt-syndic
|
||||
/etc/init.d/salt-minion
|
||||
/etc/init.d/salt-master
|
23
debian/salt/DEBIAN/control
vendored
23
debian/salt/DEBIAN/control
vendored
|
@ -1,23 +0,0 @@
|
|||
Package: salt
|
||||
Version: 0.9.2-2
|
||||
Architecture: all
|
||||
Maintainer: Aaron Toponce <aaron.toponce@gmail.com>
|
||||
Installed-Size: 500
|
||||
Depends: python (>= 2.6), python-support (>= 0.90.0), python-setuptools, python-yaml, python-crypto, python-m2crypto, python-zmq, libzmq1, libzmq-dev
|
||||
Section: admin
|
||||
Priority: optional
|
||||
Homepage: http://saltstack.org
|
||||
Description: This package provides a remote manager to administer servers.
|
||||
This package is a powerful remote execution manager that can be used to
|
||||
administer servers in a fast and efficient way.
|
||||
.
|
||||
It allows commands to be executed across large groups of servers. This
|
||||
means systems can be easily managed, but data can also be easily gathered.
|
||||
Quick introspection into running systems becomes a reality.
|
||||
.
|
||||
Remote execution is usually used to set up a certain state on a remote
|
||||
system. Salt addresses this problem as well, the salt state system uses
|
||||
salt state files to define the state a server needs to be in.
|
||||
.
|
||||
Between the remote execution system, and state management Salt addresses
|
||||
the backbone of cloud and data center management.
|
95
debian/salt/DEBIAN/md5sums
vendored
95
debian/salt/DEBIAN/md5sums
vendored
|
@ -1,95 +0,0 @@
|
|||
43cfc9ca88141431455008ee3b9f9f80 usr/bin/salt
|
||||
f3dbbb6c9ec4e685a245ccc310b260cc usr/bin/salt-call
|
||||
2d7bad45a5701ca4f00e3719bd680925 usr/bin/salt-cp
|
||||
07e4b0b76362893f0686da503c80db65 usr/bin/salt-key
|
||||
56865fd4c3b9b9f3d5dd108736911ca5 usr/bin/salt-master
|
||||
e2a341ebdb3eff1a23a2a3039e95f404 usr/bin/salt-minion
|
||||
13cacc5f012b28a0ffa5b87e7daa0333 usr/bin/salt-run
|
||||
597fbb358ca1624b7b82fde2bba83a0d usr/bin/salt-syndic
|
||||
f936141dc0f34793d24d5967411e6156 usr/share/doc/salt-0.9.2/examples/templates/json-jinja.json
|
||||
a6cd4ed7de691098c1b35304e9fde8d7 usr/share/doc/salt-0.9.2/examples/templates/json-mako.json
|
||||
f09558054cbcd4009ad34ea54a9b43fa usr/share/doc/salt-0.9.2/examples/templates/json.json
|
||||
b40452dee974b718791667aba609f46c usr/share/doc/salt-0.9.2/examples/templates/yaml-jinja.yml
|
||||
e9b21b45a6d233b546871ad52cf10ded usr/share/doc/salt-0.9.2/examples/templates/yaml-mako.yml
|
||||
4bb40844863b0ce6bf663605f45120bd usr/share/doc/salt-0.9.2/examples/templates/yaml.yml
|
||||
b47275e416a6478db07a3f7b040790b7 usr/share/doc/salt/changelog.Debian.gz
|
||||
dbdcf39a2088d3a4bbcbbb99988ba679 usr/share/doc/salt/copyright
|
||||
b39ed78876aca2bc4852e31c9e11f117 usr/share/man/man1/salt-call.1.gz
|
||||
40bd8b0d2c9cba4ac8e5a05ae491645a usr/share/man/man1/salt-cp.1.gz
|
||||
61150bf8d43cdc04b92ca89ee23447ae usr/share/man/man1/salt-key.1.gz
|
||||
01aad0b9a509ed4cc774327e6792da3c usr/share/man/man1/salt-master.1.gz
|
||||
57e79240bd3c153aee600b57f1fedd81 usr/share/man/man1/salt-minion.1.gz
|
||||
ce929afa91d7b04378eccd4520e190b3 usr/share/man/man1/salt-run.1.gz
|
||||
058f1f11aaf4b35a0f6e081a2e89b9c7 usr/share/man/man1/salt-syndic.1.gz
|
||||
78f54db48c77368cb0bbac0027d0d07d usr/share/man/man1/salt.1.gz
|
||||
b302aac3dd9168f82e0b3815394f0cba usr/share/man/man7/salt.7.gz
|
||||
5ba65ab358ffdf42ff0b7e47cd9ea0a4 usr/share/pyshared/salt-0.9.2.egg-info
|
||||
393119998ccebf818297e228d5360a65 usr/share/pyshared/salt/__init__.py
|
||||
23313cc58f1c953fb035e6107d773606 usr/share/pyshared/salt/cli/__init__.py
|
||||
4052246d11a35f456a94e4ddbf435005 usr/share/pyshared/salt/cli/caller.py
|
||||
a54cfb2084e4310ce3a57344f99c4e3a usr/share/pyshared/salt/cli/cp.py
|
||||
b900acd1f60db7e1299695b45049d12c usr/share/pyshared/salt/cli/key.py
|
||||
6d1896f239f840006a351a96ef1ec548 usr/share/pyshared/salt/client.py
|
||||
dfc863a0c70ca514f3cec9154bf275c7 usr/share/pyshared/salt/config.py
|
||||
f699feb548ddc3bf661fc65426f094d3 usr/share/pyshared/salt/crypt.py
|
||||
a459b055f85a19534af7ec4d3e7217a4 usr/share/pyshared/salt/ext/__init__.py
|
||||
bd303b0094499e49e47433d165097257 usr/share/pyshared/salt/grains/core.py
|
||||
2e920d27b739594d4e28968319dcf26f usr/share/pyshared/salt/loader.py
|
||||
534eae40a2254fb884e367a9e2a9227c usr/share/pyshared/salt/log.py
|
||||
90f5935a205304c3779c03a1c4737ccf usr/share/pyshared/salt/master.py
|
||||
34f33ca7eaf1d5018424756e0e5b377b usr/share/pyshared/salt/minion.py
|
||||
50f9d25af18d8aa09d65fc42152bdf2f usr/share/pyshared/salt/modules/apache.py
|
||||
27913c20d3bb3892a046228ef4d71335 usr/share/pyshared/salt/modules/apt.py
|
||||
6442eb82d6005c8da40096e82754538e usr/share/pyshared/salt/modules/butterkvm.py
|
||||
c6d80d7e1199938b712e37c851bf0df1 usr/share/pyshared/salt/modules/cluster.py
|
||||
173b3b5c40d0b2ddec6f010c406323d1 usr/share/pyshared/salt/modules/cmd.py
|
||||
dcd32e38e17b054c3082c20fdfcfc685 usr/share/pyshared/salt/modules/cp.py
|
||||
06aa7bd0c461aee2f738d91cc1d5df12 usr/share/pyshared/salt/modules/cron.py
|
||||
b3ecb5622994e64cac2d75781c0e5ee8 usr/share/pyshared/salt/modules/cytest.pyx
|
||||
09f51900888f97e2092a7e113acfe2de usr/share/pyshared/salt/modules/disk.py
|
||||
8a634f026e6d306e4ed96ae36f7c3894 usr/share/pyshared/salt/modules/file.py
|
||||
fbb69d63698b628525581513fb8b566e usr/share/pyshared/salt/modules/grains.py
|
||||
5ee8bb847cde4ea237c1e91aef5b43ce usr/share/pyshared/salt/modules/groupadd.py
|
||||
b58e654170ae8d2029ab2cd131751228 usr/share/pyshared/salt/modules/hosts.py
|
||||
638b3586d32827e23c1cbd0e969b8468 usr/share/pyshared/salt/modules/mdadm.py
|
||||
2020fe35b9cea1b1dd02008a0f780d78 usr/share/pyshared/salt/modules/moosefs.py
|
||||
08131dff22c59a2d2608d2c3a8b027bf usr/share/pyshared/salt/modules/mount.py
|
||||
f7301df62cf5573484420c838c38582b usr/share/pyshared/salt/modules/mysql.py
|
||||
8c02d4abb9d80840dc80ebe9c8af8fcb usr/share/pyshared/salt/modules/network.py
|
||||
bec67d2b715c4f637a72ac42aa6614c4 usr/share/pyshared/salt/modules/pacman.py
|
||||
cbd501c64d1d44789dcccae644dd3473 usr/share/pyshared/salt/modules/ps.py
|
||||
964738e1fb16afa4554b8e8e9cf3ac47 usr/share/pyshared/salt/modules/publish.py
|
||||
87336911c3565daacf1f701948520696 usr/share/pyshared/salt/modules/puppet.py
|
||||
d7195f7e4589d5d71465604632d2a215 usr/share/pyshared/salt/modules/service.py
|
||||
175f33db5d32f4f5a5780a762feb9a50 usr/share/pyshared/salt/modules/shadow.py
|
||||
75a25284cbffd6b49321b3ee257a478c usr/share/pyshared/salt/modules/ssh.py
|
||||
6e018b6a42c7a77fc798fbf6b0350844 usr/share/pyshared/salt/modules/state.py
|
||||
53009c9bce706e8b3397bfe9af64460d usr/share/pyshared/salt/modules/status.py
|
||||
ee1da4be3f881ca212083ef63977e96e usr/share/pyshared/salt/modules/sysctl.py
|
||||
289094371cb1ae80b5a4035bd54f218f usr/share/pyshared/salt/modules/test.py
|
||||
15c313e51f98fc5f4c2ad7a340eba616 usr/share/pyshared/salt/modules/useradd.py
|
||||
2d20441a402013370318a9b168ac04c0 usr/share/pyshared/salt/modules/virt.py
|
||||
44dd93cbed6e9b0b506db2cbbea325f0 usr/share/pyshared/salt/modules/yum.py
|
||||
1431969331896d2078ab79c6d3f95031 usr/share/pyshared/salt/output.py
|
||||
afe98744f850b4bb58eca21e4a485ea5 usr/share/pyshared/salt/payload.py
|
||||
3908b7a0b61e474efbdabe3df485adbe usr/share/pyshared/salt/renderers/json_jinja.py
|
||||
fca2ab66faaa2059e0ea50ada6c9ce6c usr/share/pyshared/salt/renderers/json_mako.py
|
||||
f9eaae29856a7599b74a5129da4093a7 usr/share/pyshared/salt/renderers/yaml_jinja.py
|
||||
9372d6474dcd294f85984b7c4231a7c0 usr/share/pyshared/salt/renderers/yaml_mako.py
|
||||
f5a8b6d150f65d223c6d1b8317eb7920 usr/share/pyshared/salt/returners/local.py
|
||||
7a2de89aeff6bff26467637564219778 usr/share/pyshared/salt/returners/mongo_return.py
|
||||
3cadf9146a23a9e601f67188e501ba72 usr/share/pyshared/salt/returners/redis_return.py
|
||||
dd90de52321122855402f8b05ce6c770 usr/share/pyshared/salt/runner.py
|
||||
c714614ec18bd5a731499b1013bba350 usr/share/pyshared/salt/runners/manage.py
|
||||
79605a9111e947d7d9d6309e7692aaa2 usr/share/pyshared/salt/state.py
|
||||
21f42e2e34ec88df55c334bcf3dfb7cd usr/share/pyshared/salt/states/cmd.py
|
||||
40bea476ee8a707e241c10d80ac79f9e usr/share/pyshared/salt/states/file.py
|
||||
53369f8e4613b0a32934930a3308c96d usr/share/pyshared/salt/states/group.py
|
||||
1e1ae274e0a0933681778dda26cf796a usr/share/pyshared/salt/states/host.py
|
||||
a69e690d45f59a8b355454d8a006f409 usr/share/pyshared/salt/states/mount.py
|
||||
f6adc74063b34949444243c0d6a2e859 usr/share/pyshared/salt/states/pkg.py
|
||||
4e2189a43d9a2b5c342464e8954c76fc usr/share/pyshared/salt/states/service.py
|
||||
7e9d6225419c0d61106126a464a50cfb usr/share/pyshared/salt/states/user.py
|
||||
2a6dafca9baf4ef0ef19f21d2d900ecd usr/share/pyshared/salt/utils/__init__.py
|
||||
6e5c4ec3cb9c1643503d069b28174015 usr/share/pyshared/salt/utils/find.py
|
||||
47c73381cc896230adffc8746c209bc4 usr/share/python-support/salt.public
|
22
debian/salt/DEBIAN/postinst
vendored
22
debian/salt/DEBIAN/postinst
vendored
|
@ -1,22 +0,0 @@
|
|||
#!/bin/sh
|
||||
set -e
|
||||
# Automatically added by dh_pysupport
|
||||
if which update-python-modules >/dev/null 2>&1; then
|
||||
update-python-modules salt.public
|
||||
fi
|
||||
# End automatically added section
|
||||
# Automatically added by dh_installinit
|
||||
if [ -x "/etc/init.d/salt-master" ]; then
|
||||
update-rc.d salt-master defaults >/dev/null || exit $?
|
||||
fi
|
||||
# End automatically added section
|
||||
# Automatically added by dh_installinit
|
||||
if [ -x "/etc/init.d/salt-minion" ]; then
|
||||
update-rc.d salt-minion defaults >/dev/null || exit $?
|
||||
fi
|
||||
# End automatically added section
|
||||
# Automatically added by dh_installinit
|
||||
if [ -x "/etc/init.d/salt-syndic" ]; then
|
||||
update-rc.d salt-syndic defaults >/dev/null || exit $?
|
||||
fi
|
||||
# End automatically added section
|
17
debian/salt/DEBIAN/postrm
vendored
17
debian/salt/DEBIAN/postrm
vendored
|
@ -1,17 +0,0 @@
|
|||
#!/bin/sh
|
||||
set -e
|
||||
# Automatically added by dh_installinit
|
||||
if [ "$1" = "purge" ] ; then
|
||||
update-rc.d salt-syndic remove >/dev/null
|
||||
fi
|
||||
# End automatically added section
|
||||
# Automatically added by dh_installinit
|
||||
if [ "$1" = "purge" ] ; then
|
||||
update-rc.d salt-minion remove >/dev/null
|
||||
fi
|
||||
# End automatically added section
|
||||
# Automatically added by dh_installinit
|
||||
if [ "$1" = "purge" ] ; then
|
||||
update-rc.d salt-master remove >/dev/null
|
||||
fi
|
||||
# End automatically added section
|
7
debian/salt/DEBIAN/prerm
vendored
7
debian/salt/DEBIAN/prerm
vendored
|
@ -1,7 +0,0 @@
|
|||
#!/bin/sh
|
||||
set -e
|
||||
# Automatically added by dh_pysupport
|
||||
if which update-python-modules >/dev/null 2>&1; then
|
||||
update-python-modules -c salt.public
|
||||
fi
|
||||
# End automatically added section
|
127
debian/salt/etc/init.d/salt-master
vendored
127
debian/salt/etc/init.d/salt-master
vendored
|
@ -1,127 +0,0 @@
|
|||
#!/bin/sh
|
||||
#
|
||||
# Salt master
|
||||
###################################
|
||||
|
||||
# LSB header
|
||||
|
||||
### BEGIN INIT INFO
|
||||
# Provides: salt-master
|
||||
# Required-Start: $remote_fs $network
|
||||
# Required-Stop: $remote_fs $network
|
||||
# Default-Start: 2 3 4 5
|
||||
# Default-Stop: 0 1 6
|
||||
# Short-Description: salt master control daemon
|
||||
# Description: This is a daemon that controls the salt minions
|
||||
### END INIT INFO
|
||||
|
||||
# chkconfig header
|
||||
|
||||
# chkconfig: 2345 99 01
|
||||
# description: This is a daemon that controls the salt mininons
|
||||
#
|
||||
# processname: /usr/bin/salt-master
|
||||
|
||||
# Sanity checks.
|
||||
[ -x /usr/bin/salt-master ] || exit 0
|
||||
|
||||
DEBIAN_VERSION=/etc/debian_version
|
||||
SUSE_RELEASE=/etc/SuSE-release
|
||||
# Source function library.
|
||||
if [ -f $DEBIAN_VERSION ]; then
|
||||
break
|
||||
elif [ -f $SUSE_RELEASE -a -r /etc/rc.status ]; then
|
||||
. /etc/rc.status
|
||||
else
|
||||
. /etc/rc.d/init.d/functions
|
||||
fi
|
||||
|
||||
SERVICE=salt-master
|
||||
PROCESS=salt-master
|
||||
CONFIG_ARGS=" "
|
||||
|
||||
RETVAL=0
|
||||
|
||||
start() {
|
||||
echo -n $"Starting salt-master daemon: "
|
||||
if [ -f $SUSE_RELEASE ]; then
|
||||
startproc -f -p /var/run/$SERVICE.pid /usr/bin/salt-master -d $CONFIG_ARGS
|
||||
rc_status -v
|
||||
elif [ -e $DEBIAN_VERSION ]; then
|
||||
if [ -f $LOCKFILE ]; then
|
||||
echo -n "already started, lock file found"
|
||||
RETVAL=1
|
||||
elif /usr/bin/python /usr/bin/salt-master -d; then
|
||||
echo -n "OK"
|
||||
RETVAL=0
|
||||
fi
|
||||
else
|
||||
daemon --check $SERVICE $PROCESS -d $CONFIG_ARGS
|
||||
fi
|
||||
RETVAL=$?
|
||||
echo
|
||||
return $RETVAL
|
||||
}
|
||||
|
||||
stop() {
|
||||
echo -n $"Stopping salt-master daemon: "
|
||||
if [ -f $SUSE_RELEASE ]; then
|
||||
killproc -TERM /usr/bin/salt-master
|
||||
rc_status -v
|
||||
elif [ -f $DEBIAN_VERSION ]; then
|
||||
# Added this since Debian's start-stop-daemon doesn't support spawned processes
|
||||
if ps -ef | grep "/usr/bin/python /usr/bin/salt-master" | grep -v grep | awk '{print $2}' | xargs kill &> /dev/null; then
|
||||
echo -n "OK"
|
||||
RETVAL=0
|
||||
else
|
||||
echo -n "Daemon is not started"
|
||||
RETVAL=1
|
||||
fi
|
||||
else
|
||||
killproc $PROCESS
|
||||
fi
|
||||
RETVAL=$?
|
||||
echo
|
||||
}
|
||||
|
||||
restart() {
|
||||
stop
|
||||
start
|
||||
}
|
||||
|
||||
# See how we were called.
|
||||
case "$1" in
|
||||
start|stop|restart)
|
||||
$1
|
||||
;;
|
||||
status)
|
||||
if [ -f $SUSE_RELEASE ]; then
|
||||
echo -n "Checking for service salt-master "
|
||||
checkproc /usr/bin/salt-master
|
||||
rc_status -v
|
||||
elif [ -f $DEBIAN_VERSION ]; then
|
||||
if [ -f $LOCKFILE ]; then
|
||||
RETVAL=0
|
||||
echo "salt-master is running."
|
||||
else
|
||||
RETVAL=1
|
||||
echo "salt-master is stopped."
|
||||
fi
|
||||
else
|
||||
status $PROCESS
|
||||
RETVAL=$?
|
||||
fi
|
||||
;;
|
||||
condrestart)
|
||||
[ -f $LOCKFILE ] && restart || :
|
||||
;;
|
||||
reload|force-reload)
|
||||
echo "Can't reload configuration, you have to restart it"
|
||||
RETVAL=$?
|
||||
;;
|
||||
*)
|
||||
echo $"Usage: $0 {start|stop|status|restart|condrestart|reload|force-reload}"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
exit $RETVAL
|
127
debian/salt/etc/init.d/salt-minion
vendored
127
debian/salt/etc/init.d/salt-minion
vendored
|
@ -1,127 +0,0 @@
|
|||
#!/bin/sh
|
||||
#
|
||||
# Salt minion
|
||||
###################################
|
||||
|
||||
# LSB header
|
||||
|
||||
### BEGIN INIT INFO
|
||||
# Provides: salt-minion
|
||||
# Required-Start: $remote_fs $network
|
||||
# Required-Stop: $remote_fs $network
|
||||
# Default-Start: 2 3 4 5
|
||||
# Default-Stop: 0 1 6
|
||||
# Short-Description: salt minion control daemon
|
||||
# Description: This is a daemon that controls the salt minions
|
||||
### END INIT INFO
|
||||
|
||||
# chkconfig header
|
||||
|
||||
# chkconfig: 2345 99 01
|
||||
# description: This is a daemon that controls the salt mininons
|
||||
#
|
||||
# processname: /usr/bin/salt-minion
|
||||
|
||||
# Sanity checks.
|
||||
[ -x /usr/bin/salt-minion ] || exit 0
|
||||
|
||||
DEBIAN_VERSION=/etc/debian_version
|
||||
SUSE_RELEASE=/etc/SuSE-release
|
||||
# Source function library.
|
||||
if [ -f $DEBIAN_VERSION ]; then
|
||||
break
|
||||
elif [ -f $SUSE_RELEASE -a -r /etc/rc.status ]; then
|
||||
. /etc/rc.status
|
||||
else
|
||||
. /etc/rc.d/init.d/functions
|
||||
fi
|
||||
|
||||
SERVICE=salt-minion
|
||||
PROCESS=salt-minion
|
||||
CONFIG_ARGS=" "
|
||||
|
||||
RETVAL=0
|
||||
|
||||
start() {
|
||||
echo -n $"Starting salt-minion daemon: "
|
||||
if [ -f $SUSE_RELEASE ]; then
|
||||
startproc -f -p /var/run/$SERVICE.pid /usr/bin/salt-minion -d $CONFIG_ARGS
|
||||
rc_status -v
|
||||
elif [ -e $DEBIAN_VERSION ]; then
|
||||
if [ -f $LOCKFILE ]; then
|
||||
echo -n "already started, lock file found"
|
||||
RETVAL=1
|
||||
elif /usr/bin/python /usr/bin/salt-minion -d; then
|
||||
echo -n "OK"
|
||||
RETVAL=0
|
||||
fi
|
||||
else
|
||||
daemon --check $SERVICE $PROCESS -d $CONFIG_ARGS
|
||||
fi
|
||||
RETVAL=$?
|
||||
echo
|
||||
return $RETVAL
|
||||
}
|
||||
|
||||
stop() {
|
||||
echo -n $"Stopping salt-minion daemon: "
|
||||
if [ -f $SUSE_RELEASE ]; then
|
||||
killproc -TERM /usr/bin/salt-minion
|
||||
rc_status -v
|
||||
elif [ -f $DEBIAN_VERSION ]; then
|
||||
# Added this since Debian's start-stop-daemon doesn't support spawned processes
|
||||
if ps -ef | grep "/usr/bin/python /usr/bin/salt-minion" | grep -v grep | awk '{print $2}' | xargs kill &> /dev/null; then
|
||||
echo -n "OK"
|
||||
RETVAL=0
|
||||
else
|
||||
echo -n "Daemon is not started"
|
||||
RETVAL=1
|
||||
fi
|
||||
else
|
||||
killproc $PROCESS
|
||||
fi
|
||||
RETVAL=$?
|
||||
echo
|
||||
}
|
||||
|
||||
restart() {
|
||||
stop
|
||||
start
|
||||
}
|
||||
|
||||
# See how we were called.
|
||||
case "$1" in
|
||||
start|stop|restart)
|
||||
$1
|
||||
;;
|
||||
status)
|
||||
if [ -f $SUSE_RELEASE ]; then
|
||||
echo -n "Checking for service salt-minion "
|
||||
checkproc /usr/bin/salt-minion
|
||||
rc_status -v
|
||||
elif [ -f $DEBIAN_VERSION ]; then
|
||||
if [ -f $LOCKFILE ]; then
|
||||
RETVAL=0
|
||||
echo "salt-minion is running."
|
||||
else
|
||||
RETVAL=1
|
||||
echo "salt-minion is stopped."
|
||||
fi
|
||||
else
|
||||
status $PROCESS
|
||||
RETVAL=$?
|
||||
fi
|
||||
;;
|
||||
condrestart)
|
||||
[ -f $LOCKFILE ] && restart || :
|
||||
;;
|
||||
reload|force-reload)
|
||||
echo "Can't reload configuration, you have to restart it"
|
||||
RETVAL=$?
|
||||
;;
|
||||
*)
|
||||
echo $"Usage: $0 {start|stop|status|restart|condrestart|reload|force-reload}"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
exit $RETVAL
|
124
debian/salt/etc/init.d/salt-syndic
vendored
124
debian/salt/etc/init.d/salt-syndic
vendored
|
@ -1,124 +0,0 @@
|
|||
#!/bin/sh
|
||||
#
|
||||
# Salt syndic
|
||||
###################################
|
||||
|
||||
# LSB header
|
||||
|
||||
### BEGIN INIT INFO
|
||||
# Provides: salt-syndic
|
||||
# Required-Start: $remote_fs $network
|
||||
# Required-Stop: $remote_fs $network
|
||||
# Default-Start: 2 3 4 5
|
||||
# Default-Stop: 0 1 6
|
||||
# Short-Description: salt syndic master-minion passthrough daemon
|
||||
# Description: This is a daemon that controls the salt syndic
|
||||
### END INIT INFO
|
||||
|
||||
# chkconfig header
|
||||
|
||||
# chkconfig: 2345 99 01
|
||||
# description: This is a daemon that controls the salt mininons
|
||||
#
|
||||
# processname: /usr/bin/salt-syndic
|
||||
|
||||
# Sanity checks.
|
||||
[ -x /usr/bin/salt-syndic ] || exit 0
|
||||
|
||||
DEBIAN_VERSION=/etc/debian_version
|
||||
SUSE_RELEASE=/etc/SuSE-release
|
||||
# Source function library.
|
||||
if [ -f $DEBIAN_VERSION ]; then
|
||||
break
|
||||
elif [ -f $SUSE_RELEASE -a -r /etc/rc.status ]; then
|
||||
. /etc/rc.status
|
||||
else
|
||||
. /etc/rc.d/init.d/functions
|
||||
fi
|
||||
|
||||
SERVICE=salt-syndic
|
||||
PROCESS=salt-syndic
|
||||
CONFIG_ARGS=" "
|
||||
|
||||
RETVAL=0
|
||||
|
||||
start() {
|
||||
echo -n $"Starting salt-syndic daemon: "
|
||||
if [ -f $SUSE_RELEASE ]; then
|
||||
startproc -f -p /var/run/$SERVICE.pid /usr/bin/salt-syndic -d $CONFIG_ARGS
|
||||
rc_status -v
|
||||
elif [ -e $DEBIAN_VERSION ]; then
|
||||
if [ -f $LOCKFILE ]; then
|
||||
echo -n "already started, lock file found"
|
||||
RETVAL=1
|
||||
elif /usr/bin/python /usr/bin/salt-syndic -d; then
|
||||
echo -n "OK"
|
||||
RETVAL=0
|
||||
fi
|
||||
else
|
||||
daemon --check $SERVICE $PROCESS -d $CONFIG_ARGS
|
||||
fi
|
||||
RETVAL=$?
|
||||
echo
|
||||
return $RETVAL
|
||||
}
|
||||
|
||||
stop() {
|
||||
echo -n $"Stopping salt-syndic daemon: "
|
||||
if [ -f $SUSE_RELEASE ]; then
|
||||
killproc -TERM /usr/bin/salt-syndic
|
||||
rc_status -v
|
||||
elif [ -f $DEBIAN_VERSION ]; then
|
||||
# Added this since Debian's start-stop-daemon doesn't support spawned processes
|
||||
if ps -ef | grep "/usr/bin/python /usr/bin/salt-syndic" | grep -v grep | awk '{print $2}' | xargs kill &> /dev/null; then
|
||||
echo -n "OK"
|
||||
RETVAL=0
|
||||
else
|
||||
echo -n "Daemon is not started"
|
||||
RETVAL=1
|
||||
fi
|
||||
else
|
||||
killproc $PROCESS
|
||||
fi
|
||||
RETVAL=$?
|
||||
echo
|
||||
}
|
||||
|
||||
restart() {
|
||||
stop
|
||||
start
|
||||
}
|
||||
|
||||
# See how we were called.
|
||||
case "$1" in
|
||||
start|stop|restart)
|
||||
$1
|
||||
;;
|
||||
status)
|
||||
if [ -f $SUSE_RELEASE ]; then
|
||||
echo -n "Checking for service salt-syndic "
|
||||
checkproc /usr/bin/salt-syndic
|
||||
rc_status -v
|
||||
elif [ -f $DEBIAN_VERSION ]; then
|
||||
if [ -f $LOCKFILE ]; then
|
||||
RETVAL=0
|
||||
echo "salt-syndic is running."
|
||||
else
|
||||
RETVAL=1
|
||||
echo "salt-syndic is stopped."
|
||||
fi
|
||||
else
|
||||
status $PROCESS
|
||||
RETVAL=$?
|
||||
fi
|
||||
;;
|
||||
reload|force-reload)
|
||||
echo "Can't reload configuration, you have to restart it"
|
||||
RETVAL=$?
|
||||
;;
|
||||
*)
|
||||
echo $"Usage: $0 {start|stop|status|restart|reload|force-reload}"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
exit $RETVAL
|
166
debian/salt/etc/salt/master
vendored
166
debian/salt/etc/salt/master
vendored
|
@ -1,166 +0,0 @@
|
|||
##### Primary configuration settings #####
|
||||
##########################################
|
||||
# The address of the interface to bind to
|
||||
#interface: 0.0.0.0
|
||||
|
||||
# The port used by the publisher
|
||||
#publish_port: 4505
|
||||
|
||||
# The number of worker threads to start, these threads are used to manage
|
||||
# return calls made from minions to the master, if the master seems to be
|
||||
# running slowly, increase the number of threads
|
||||
#worker_threads: 5
|
||||
|
||||
# The port used by the communication interface
|
||||
#ret_port: 4506
|
||||
|
||||
# The root directory prepended to these options: pki_dir, cachedir, log_file.
|
||||
#root_dir: /
|
||||
|
||||
# Directory used to store public key data
|
||||
#pki_dir: /etc/salt/pki
|
||||
|
||||
# Directory to store job and cache data
|
||||
#cachedir: /var/cache/salt
|
||||
|
||||
# Set the number of hours to keep old job information
|
||||
#keep_jobs: 24
|
||||
|
||||
# Set the directory used to hold unix sockets
|
||||
#sock_dir: /tmp/salt-unix
|
||||
|
||||
##### Security settings #####
|
||||
##########################################
|
||||
# Enable "open mode", this mode still maintains encryption, but turns off
|
||||
# authentication, this is only intended for highly secure environments or for
|
||||
# the situation where your keys end up in a bad state. If you run in open more
|
||||
# you do so at your own risk!
|
||||
#open_mode: False
|
||||
|
||||
# Enable auto_accept, this setting will automatically accept all incoming
|
||||
# public keys from the minions
|
||||
#auto_accept: False
|
||||
|
||||
##### State System settings #####
|
||||
##########################################
|
||||
# The state system uses a "top" file to tell the minions what environment to
|
||||
# use and what modules to use. The state_top file is defined relative to the
|
||||
# root of the base environment
|
||||
#state_top: top.yml
|
||||
#
|
||||
# The renderer to use on the minions to render the state data
|
||||
#renderer: yaml_jinja
|
||||
|
||||
##### File Server settings #####
|
||||
##########################################
|
||||
# Salt runs a lightweight file server written in zeromq to deliver files to
|
||||
# minions. This file server is built into the master daemon and does not
|
||||
# require a dedicated port.
|
||||
|
||||
# The file server works on environments passed to the master, each environment
|
||||
# can have multiple root directories, the subdirectories in the multiple file
|
||||
# roots cannot match, otherwise the downloaded files will not be able to be
|
||||
# reliably ensured. A base environment is required to house the top file
|
||||
# Example:
|
||||
# file_roots:
|
||||
# base:
|
||||
# - /srv/salt/
|
||||
# dev:
|
||||
# - /srv/salt/dev/services
|
||||
# - /srv/salt/dev/states
|
||||
# prod:
|
||||
# - /srv/salt/prod/services
|
||||
# - /srv/salt/prod/states
|
||||
#
|
||||
# Default:
|
||||
#file_roots:
|
||||
# base:
|
||||
# - /srv/salt
|
||||
|
||||
# The hash_type is the hash to use when discovering the hash of a file on
|
||||
# the master server, the default is md5, but sha1, sha224, sha256, sha384
|
||||
# and sha512 are also supported.
|
||||
#hash_type: md5
|
||||
|
||||
# The buffer size in the file server can be adjusted here:
|
||||
#file_buffer_size: 1048576
|
||||
|
||||
##### Syndic settings #####
|
||||
##########################################
|
||||
# The Salt syndic is used to pass commands through a master from a higher
|
||||
# master. Using the syndic is simple, if this is a master that will have
|
||||
# syndic servers(s) below it set the "order_masters" setting to True, if this
|
||||
# is a master that will be running a syndic daemon for passthrough the
|
||||
# "syndic_master" setting needs to be set to the location of the master server
|
||||
# to recieve commands from
|
||||
#
|
||||
# Set the order_masters setting to True if this master will command lower
|
||||
# masters' syndic interfaces
|
||||
#order_masters: False
|
||||
#
|
||||
# If this master will be running a salt syndic daemon, then the syndic needs
|
||||
# to know where the master it is recieving commands from is, set it with the
|
||||
# syndic_master value
|
||||
#syndic_master: masterofmaster
|
||||
|
||||
##### Peer Publish settings #####
|
||||
##########################################
|
||||
# Salt minions can send commands to other minions, but only if the minion is
|
||||
# allowed to. By default "Peer Publication" is disabled, and when enabled it
|
||||
# is enabled for specific minions and specific commands. This allows secure
|
||||
# compartmentalization of commands based on individual minions.
|
||||
#
|
||||
# The configuration uses regular expressions to match minions and then a list
|
||||
# of regular expressions to match functions, the following will allow the
|
||||
# minion authenticated as foo.example.com to execute functions from the test
|
||||
# and pkg modules
|
||||
# peer:
|
||||
# foo.example.com:
|
||||
# - test.*
|
||||
# - pkg.*
|
||||
#
|
||||
# This will allow all minions to execute all commands:
|
||||
# peer:
|
||||
# .*:
|
||||
# - .*
|
||||
# This is not recomanded, since it would allow anyone who gets root on any
|
||||
# single minion to instantly have root on all of the minions!
|
||||
#
|
||||
|
||||
##### Cluster settings #####
|
||||
##########################################
|
||||
# Salt supports automatic clustering, salt creates a single ip address which
|
||||
# is shared among the individual salt components using ucarp. The private key
|
||||
# and all of the minion keys are maintained across the defined cluster masters
|
||||
# The failover service is automatically managed via these settings
|
||||
|
||||
# List the identifiers for the other cluster masters in this manner:
|
||||
# [saltmaster-01.foo.com,saltmaster-02.foo.com,saltmaster-03.foo.com]
|
||||
# The members of this master array must be running as salt minions to
|
||||
# facilitate the distribution of cluster information
|
||||
#cluster_masters: []
|
||||
|
||||
# The cluster modes are "paranoid" and "full"
|
||||
# paranoid will only distribute the accepted minion public keys.
|
||||
# full will also distribute the master private key.
|
||||
#cluster_mode: paranoid
|
||||
|
||||
|
||||
##### Logging settings #####
|
||||
##########################################
|
||||
# The location of the master log file
|
||||
#log_file: /var/log/salt/master
|
||||
# The level of messages to send to the log file.
|
||||
# One of 'info', 'quiet', 'critical', 'error', 'debug', 'warning'.
|
||||
# Default: 'warning'
|
||||
#log_level: warning
|
||||
#
|
||||
# Logger levels can be used to tweak specific loggers logging levels.
|
||||
# Imagine you want to have the salt library at the 'warning' level, but, you
|
||||
# still wish to have 'salt.modules' at the 'debug' level:
|
||||
# log_granular_levels: {
|
||||
# 'salt': 'warning',
|
||||
# 'salt.modules': 'debug'
|
||||
# }
|
||||
#
|
||||
#log_granular_levels: {}
|
111
debian/salt/etc/salt/minion
vendored
111
debian/salt/etc/salt/minion
vendored
|
@ -1,111 +0,0 @@
|
|||
##### Primary configuration settings #####
|
||||
##########################################
|
||||
# Set the location of the salt master server, if the master server cannot be
|
||||
# resolved, then the minion will fail to start
|
||||
#master: salt
|
||||
|
||||
# Set the post used by the master reply and authentication server
|
||||
#master_port: 4506
|
||||
|
||||
# The root directory prepended to these options: pki_dir, cachedir, log_file.
|
||||
#root_dir: /
|
||||
|
||||
# The directory to store the pki information in
|
||||
#pki_dir: /etc/salt/pki
|
||||
|
||||
# Explicitly declare the id for this minion to use, if left commented the id
|
||||
# will be the hostname as returned by the python call: socket.getfqdn()
|
||||
# Since salt uses detached ids it is possible to run multiple minions on the
|
||||
# same machine but with different ids, this can be useful for salt compute
|
||||
# clusters.
|
||||
#id:
|
||||
|
||||
# Where cache data goes
|
||||
#cachedir: /var/cache/salt
|
||||
|
||||
|
||||
##### Minion module management #####
|
||||
##########################################
|
||||
# Disable specific modules, this will allow the admin to limit the level os
|
||||
# access the master has to the minion
|
||||
#disable_modules: [cmd,test]
|
||||
#disable_returners: []
|
||||
# Modules can be loaded from arbitrary paths, this enables the easy deployment
|
||||
# of third party modules, modules for returners and minions can be loaded.
|
||||
# Specify a list of extra directories to search for minion modules and
|
||||
# returners. These paths must be fully qualified!
|
||||
#module_dirs: []
|
||||
#returner_dirs: []
|
||||
#states_dirs: []
|
||||
#render_dirs: []
|
||||
# Enable Cython modules searching and loading. (Default: False)
|
||||
#cython_enable: False
|
||||
|
||||
##### State Management Settings #####
|
||||
###########################################
|
||||
# The state management system executes all of the state templates on the minion
|
||||
# to enable more granular control of system state management. The type of
|
||||
# template and serialization used for state management needs to be configured
|
||||
# on the minion, the default renderer is yaml_jinja. This is a yaml file
|
||||
# rendered from a jinja template, the available options are:
|
||||
# yaml_jinja
|
||||
# yaml_mako
|
||||
# json_jinja
|
||||
# json_mako
|
||||
#
|
||||
#renderer: yaml_jinja
|
||||
#
|
||||
# Test allows for the state runs to only be test runs
|
||||
#test: False
|
||||
|
||||
###### Security settings #####
|
||||
###########################################
|
||||
# Enable "open mode", this mode still maintains encryption, but turns off
|
||||
# authentication, this is only intended for highly secure environments or for
|
||||
# the situation where your keys end up in a bad state. If you run in open mode
|
||||
# you do so at your own risk!
|
||||
#open_mode: False
|
||||
|
||||
|
||||
###### Thread settings #####
|
||||
###########################################
|
||||
# Disable multiprocessing support, by default when a minion receives a
|
||||
# publication a new process is spawned and the command is executed therein.
|
||||
#multiprocessing: True
|
||||
|
||||
###### Logging settings #####
|
||||
###########################################
|
||||
# The location of the minion log file
|
||||
#log_file: /var/log/salt/minion
|
||||
# The level of messages to send to the log file.
|
||||
# One of 'info', 'quiet', 'critical', 'error', 'debug', 'warning'.
|
||||
# Default: 'warning'
|
||||
#log_level: warning
|
||||
#
|
||||
# Logger levels can be used to tweak specific loggers logging levels.
|
||||
# Imagine you want to have the salt library at the 'warning' level, but, you
|
||||
# still wish to have 'salt.modules' at the 'debug' level:
|
||||
# log_granular_levels: {
|
||||
# 'salt': 'warning',
|
||||
# 'salt.modules': 'debug'
|
||||
# }
|
||||
#
|
||||
#log_granular_levels: {}
|
||||
|
||||
|
||||
###### Module configuration #####
|
||||
###########################################
|
||||
# Salt allows for modules to be passed arbitrary configuration data, any data
|
||||
# passed here in valid yaml format will be passed on to the salt minion modules
|
||||
# for use. It is STRONGLY recommended that a naming convention be used in which
|
||||
# the module name is followed by a . and then the value. Also, all top level
|
||||
# data must be allied via the yaml dict construct, some examples:
|
||||
#
|
||||
# A simple value for the test module:
|
||||
#test.foo: foo
|
||||
#
|
||||
# A list for the test module:
|
||||
#test.bar: [baz,quo]
|
||||
#
|
||||
# A dict for the test module:
|
||||
#test.baz: {spam: sausage, cheese: bread}
|
16
debian/salt/usr/bin/salt
vendored
16
debian/salt/usr/bin/salt
vendored
|
@ -1,16 +0,0 @@
|
|||
#!/usr/bin/python
|
||||
'''
|
||||
Publish commands to the salt system from the command line on the master.
|
||||
'''
|
||||
|
||||
import salt.cli
|
||||
|
||||
def main():
|
||||
'''
|
||||
The main function
|
||||
'''
|
||||
client = salt.cli.SaltCMD()
|
||||
client.run()
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
17
debian/salt/usr/bin/salt-call
vendored
17
debian/salt/usr/bin/salt-call
vendored
|
@ -1,17 +0,0 @@
|
|||
#!/usr/bin/python
|
||||
'''
|
||||
Directly call a salt command in the modules, does not require a running salt
|
||||
minion to run.
|
||||
'''
|
||||
|
||||
import salt.cli
|
||||
|
||||
def main():
|
||||
'''
|
||||
The main function
|
||||
'''
|
||||
client = salt.cli.SaltCall()
|
||||
client.run()
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
16
debian/salt/usr/bin/salt-cp
vendored
16
debian/salt/usr/bin/salt-cp
vendored
|
@ -1,16 +0,0 @@
|
|||
#!/usr/bin/python
|
||||
'''
|
||||
Publish commands to the salt system from the command line on the master.
|
||||
'''
|
||||
|
||||
import salt.cli
|
||||
|
||||
def main():
|
||||
'''
|
||||
The main function
|
||||
'''
|
||||
cp_ = salt.cli.SaltCP()
|
||||
cp_.run()
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
15
debian/salt/usr/bin/salt-key
vendored
15
debian/salt/usr/bin/salt-key
vendored
|
@ -1,15 +0,0 @@
|
|||
#!/usr/bin/python
|
||||
'''
|
||||
Manage the authentication keys with salt-key
|
||||
'''
|
||||
import salt.cli
|
||||
|
||||
def main():
|
||||
'''
|
||||
The main function
|
||||
'''
|
||||
saltkey = salt.cli.SaltKey()
|
||||
saltkey.run()
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
15
debian/salt/usr/bin/salt-master
vendored
15
debian/salt/usr/bin/salt-master
vendored
|
@ -1,15 +0,0 @@
|
|||
#!/usr/bin/python
|
||||
'''
|
||||
Start the salt-master
|
||||
'''
|
||||
import salt
|
||||
|
||||
def main():
|
||||
'''
|
||||
The main function
|
||||
'''
|
||||
master = salt.Master()
|
||||
master.start()
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
20
debian/salt/usr/bin/salt-minion
vendored
20
debian/salt/usr/bin/salt-minion
vendored
|
@ -1,20 +0,0 @@
|
|||
#!/usr/bin/python
|
||||
'''
|
||||
This script is used to kick off a salt minion daemon
|
||||
'''
|
||||
import salt
|
||||
import os
|
||||
|
||||
def main():
|
||||
'''
|
||||
The main function
|
||||
'''
|
||||
pid = os.getpid()
|
||||
try:
|
||||
minion = salt.Minion()
|
||||
minion.start()
|
||||
except KeyboardInterrupt:
|
||||
os.kill(pid, 15)
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
16
debian/salt/usr/bin/salt-run
vendored
16
debian/salt/usr/bin/salt-run
vendored
|
@ -1,16 +0,0 @@
|
|||
#!/usr/bin/python
|
||||
'''
|
||||
Execute a salt convenience routine
|
||||
'''
|
||||
|
||||
import salt.cli
|
||||
|
||||
def main():
|
||||
'''
|
||||
The main function
|
||||
'''
|
||||
client = salt.cli.SaltRun()
|
||||
client.run()
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
20
debian/salt/usr/bin/salt-syndic
vendored
20
debian/salt/usr/bin/salt-syndic
vendored
|
@ -1,20 +0,0 @@
|
|||
#!/usr/bin/python
|
||||
'''
|
||||
This script is used to kick off a salt syndic daemon
|
||||
'''
|
||||
import salt
|
||||
import os
|
||||
|
||||
def main():
|
||||
'''
|
||||
The main function
|
||||
'''
|
||||
pid = os.getpid()
|
||||
try:
|
||||
syndic = salt.Syndic()
|
||||
syndic.start()
|
||||
except KeyboardInterrupt:
|
||||
os.kill(pid, 15)
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
|
@ -1,11 +0,0 @@
|
|||
{
|
||||
{% for item in 'vim','emacs','nano','butter' %}
|
||||
"{{ item }}": { "pkg": ["installed"] },
|
||||
{% endfor %}
|
||||
"salt": {
|
||||
"pkg": ["installed"],
|
||||
"service": ["running", "enabled", {"names": ["salt-master", "salt-minion"]}]},
|
||||
"cron": {
|
||||
"pkg": ["installed"],
|
||||
"service": ["running", "enabled", {"name": "crond"}]}
|
||||
}
|
|
@ -1,11 +0,0 @@
|
|||
{
|
||||
% for item in 'vim', 'emacs', 'nano', 'butter':
|
||||
"${item}": { "pkg": ["installed"] },
|
||||
% endfor
|
||||
"salt": {
|
||||
"pkg": ["installed"],
|
||||
"service": ["running", "enabled", {"names": ["salt-master", "salt-minion"]}]},
|
||||
"cron": {
|
||||
"pkg": ["installed"],
|
||||
"service": ["running", "enabled", {"name": "crond"}]}
|
||||
}
|
|
@ -1,12 +0,0 @@
|
|||
{
|
||||
"vim": { "pkg": ["installed"] },
|
||||
"emacs": { "pkg": ["installed"] },
|
||||
"nano": { "pkg": ["installed"] },
|
||||
"butter": { "pkg": ["installed"] },
|
||||
"salt": {
|
||||
"pkg": ["installed"],
|
||||
"service": ["running", "enabled", {"names": ["salt-master", "salt-minion"]}]},
|
||||
"cron": {
|
||||
"pkg": ["installed"],
|
||||
"service": ["running", "enabled", {"name": "crond"}]}
|
||||
}
|
|
@ -1,23 +0,0 @@
|
|||
{% for item in 'vim','emacs','nano' %}
|
||||
{{ item }}:
|
||||
pkg:
|
||||
- installed
|
||||
{% endfor %}
|
||||
salt:
|
||||
pkg:
|
||||
- installed
|
||||
service:
|
||||
- running
|
||||
- require:
|
||||
- pkg: salt
|
||||
- names:
|
||||
- salt-master
|
||||
- salt-minion
|
||||
cronie:
|
||||
pkg:
|
||||
- installed
|
||||
service:
|
||||
- enabled
|
||||
- require:
|
||||
- pkg: cronie
|
||||
- name: crond
|
|
@ -1,23 +0,0 @@
|
|||
% for item in 'vim', 'emacs', 'nano':
|
||||
${item}:
|
||||
pkg:
|
||||
- installed
|
||||
% endfor
|
||||
salt:
|
||||
pkg:
|
||||
- installed
|
||||
service:
|
||||
- running
|
||||
- require:
|
||||
- pkg: salt
|
||||
- names:
|
||||
- salt-master
|
||||
- salt-minion
|
||||
cronie:
|
||||
pkg:
|
||||
- installed
|
||||
service:
|
||||
- running
|
||||
- require:
|
||||
- pkg: cronie
|
||||
- name: crond
|
|
@ -1,27 +0,0 @@
|
|||
vim:
|
||||
pkg:
|
||||
- installed
|
||||
emacs:
|
||||
pkg:
|
||||
- installed
|
||||
nano:
|
||||
pkg:
|
||||
- latest
|
||||
salt:
|
||||
pkg:
|
||||
- installed
|
||||
service:
|
||||
- running
|
||||
- require:
|
||||
- pkg: vim
|
||||
- pkg: salt
|
||||
- service: cron
|
||||
- names:
|
||||
- salt-master
|
||||
- salt-minion
|
||||
cron:
|
||||
pkg:
|
||||
- installed
|
||||
service:
|
||||
- running
|
||||
- name: cronie
|
BIN
debian/salt/usr/share/doc/salt/changelog.Debian.gz
vendored
BIN
debian/salt/usr/share/doc/salt/changelog.Debian.gz
vendored
Binary file not shown.
42
debian/salt/usr/share/doc/salt/copyright
vendored
42
debian/salt/usr/share/doc/salt/copyright
vendored
|
@ -1,42 +0,0 @@
|
|||
Format: http://dep.debian.net/deps/dep5
|
||||
Upstream-Name: salt
|
||||
Upstream-Contact: Aaron Toponce <aaron.toponce@gmail.com>
|
||||
Source: http://github.com/thatch45/salt/
|
||||
|
||||
Files: *
|
||||
Copyright: 2011 Thomas S Hatch <thatch45@gmail.com>
|
||||
License: Apache-2.0
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
.
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
.
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
.
|
||||
On Debian systems, the full text of the Apache Licens, Version 2.0 can be
|
||||
found in the file
|
||||
`/usr/share/common-licenses/Apache-2.0'.
|
||||
|
||||
Files: debian/*
|
||||
Copyright: 2011 Aaron Toponce <aaron.toponce@gmail.com>
|
||||
License: GPL-3+
|
||||
This package is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation; either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
.
|
||||
This package is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
.
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program. If not, see <http://www.gnu.org/licenses/>
|
||||
.
|
||||
On Debian systems, the complete text of the GNU General
|
||||
Public License version 3 can be found in "/usr/share/common-licenses/GPL-3".
|
BIN
debian/salt/usr/share/man/man1/salt-call.1.gz
vendored
BIN
debian/salt/usr/share/man/man1/salt-call.1.gz
vendored
Binary file not shown.
BIN
debian/salt/usr/share/man/man1/salt-cp.1.gz
vendored
BIN
debian/salt/usr/share/man/man1/salt-cp.1.gz
vendored
Binary file not shown.
BIN
debian/salt/usr/share/man/man1/salt-key.1.gz
vendored
BIN
debian/salt/usr/share/man/man1/salt-key.1.gz
vendored
Binary file not shown.
BIN
debian/salt/usr/share/man/man1/salt-master.1.gz
vendored
BIN
debian/salt/usr/share/man/man1/salt-master.1.gz
vendored
Binary file not shown.
BIN
debian/salt/usr/share/man/man1/salt-minion.1.gz
vendored
BIN
debian/salt/usr/share/man/man1/salt-minion.1.gz
vendored
Binary file not shown.
BIN
debian/salt/usr/share/man/man1/salt-run.1.gz
vendored
BIN
debian/salt/usr/share/man/man1/salt-run.1.gz
vendored
Binary file not shown.
BIN
debian/salt/usr/share/man/man1/salt-syndic.1.gz
vendored
BIN
debian/salt/usr/share/man/man1/salt-syndic.1.gz
vendored
Binary file not shown.
BIN
debian/salt/usr/share/man/man1/salt.1.gz
vendored
BIN
debian/salt/usr/share/man/man1/salt.1.gz
vendored
Binary file not shown.
BIN
debian/salt/usr/share/man/man7/salt.7.gz
vendored
BIN
debian/salt/usr/share/man/man7/salt.7.gz
vendored
Binary file not shown.
|
@ -1,22 +0,0 @@
|
|||
Metadata-Version: 1.0
|
||||
Name: salt
|
||||
Version: 0.9.2
|
||||
Summary: Portable, distrubuted, remote execution and configuration management system
|
||||
Home-page: https://github.com/thatch45/salt
|
||||
Author: Thomas S Hatch
|
||||
Author-email: thatch45@gmail.com
|
||||
License: UNKNOWN
|
||||
Description: UNKNOWN
|
||||
Platform: UNKNOWN
|
||||
Classifier: Programming Language :: Python
|
||||
Classifier: Programming Language :: Cython
|
||||
Classifier: Programming Language :: Python :: 2.5
|
||||
Classifier: Development Status :: 5 - Production/Stable
|
||||
Classifier: Environment :: Console
|
||||
Classifier: Intended Audience :: Developers
|
||||
Classifier: Intended Audience :: Information Technology
|
||||
Classifier: Intended Audience :: System Administrators
|
||||
Classifier: License :: OSI Approved :: Apache Software License
|
||||
Classifier: Operating System :: POSIX :: Linux
|
||||
Classifier: Topic :: System :: Clustering
|
||||
Classifier: Topic :: System :: Distributed Computing
|
255
debian/salt/usr/share/pyshared/salt/__init__.py
vendored
255
debian/salt/usr/share/pyshared/salt/__init__.py
vendored
|
@ -1,255 +0,0 @@
|
|||
'''
|
||||
Make me some salt!
|
||||
'''
|
||||
# Import python libs
|
||||
import optparse
|
||||
import os
|
||||
import sys
|
||||
# Import salt libs
|
||||
import salt.config
|
||||
|
||||
|
||||
def verify_env(dirs):
|
||||
'''
|
||||
Verify that the named directories are in place and that the environment
|
||||
can shake the salt
|
||||
'''
|
||||
for dir_ in dirs:
|
||||
if not os.path.isdir(dir_):
|
||||
try:
|
||||
os.makedirs(dir_)
|
||||
except OSError, e:
|
||||
print 'Failed to create directory path "%s" - %s' % (dir_, e)
|
||||
|
||||
class Master(object):
|
||||
'''
|
||||
Creates a master server
|
||||
'''
|
||||
def __init__(self):
|
||||
self.cli = self.__parse_cli()
|
||||
self.opts = salt.config.master_config(self.cli['config'])
|
||||
|
||||
def __parse_cli(self):
|
||||
'''
|
||||
Parse the cli for options passed to a master daemon
|
||||
'''
|
||||
import salt.log
|
||||
parser = optparse.OptionParser()
|
||||
parser.add_option('-d',
|
||||
'--daemon',
|
||||
dest='daemon',
|
||||
default=False,
|
||||
action='store_true',
|
||||
help='Run the master in a daemon')
|
||||
parser.add_option('-c',
|
||||
'--config',
|
||||
dest='config',
|
||||
default='/etc/salt/master',
|
||||
help='Pass in an alternative configuration file')
|
||||
parser.add_option('-l',
|
||||
'--log-level',
|
||||
dest='log_level',
|
||||
default='warning',
|
||||
choices=salt.log.LOG_LEVELS.keys(),
|
||||
help='Console log level. One of %s. For the logfile settings '
|
||||
'see the config file. Default: \'%%default\'.' %
|
||||
', '.join([repr(l) for l in salt.log.LOG_LEVELS.keys()])
|
||||
)
|
||||
|
||||
options, args = parser.parse_args()
|
||||
salt.log.setup_console_logger(options.log_level)
|
||||
|
||||
cli = {'daemon': options.daemon,
|
||||
'config': options.config}
|
||||
|
||||
return cli
|
||||
|
||||
def start(self):
|
||||
'''
|
||||
Run the sequence to start a salt master server
|
||||
'''
|
||||
verify_env([os.path.join(self.opts['pki_dir'], 'minions'),
|
||||
os.path.join(self.opts['pki_dir'], 'minions_pre'),
|
||||
os.path.join(self.opts['cachedir'], 'jobs'),
|
||||
os.path.dirname(self.opts['log_file']),
|
||||
self.opts['sock_dir'],
|
||||
])
|
||||
import salt.log
|
||||
salt.log.setup_logfile_logger(
|
||||
self.opts['log_file'], self.opts['log_level']
|
||||
)
|
||||
for name, level in self.opts['log_granular_levels'].iteritems():
|
||||
salt.log.set_logger_level(name, level)
|
||||
import logging
|
||||
# Late import so logging works correctly
|
||||
import salt.master
|
||||
master = salt.master.Master(self.opts)
|
||||
if self.cli['daemon']:
|
||||
# Late import so logging works correctly
|
||||
import salt.utils
|
||||
salt.utils.daemonize()
|
||||
master.start()
|
||||
|
||||
|
||||
class Minion(object):
|
||||
'''
|
||||
Create a minion server
|
||||
'''
|
||||
def __init__(self):
|
||||
self.cli = self.__parse_cli()
|
||||
self.opts = salt.config.minion_config(self.cli['config'])
|
||||
|
||||
def __parse_cli(self):
|
||||
'''
|
||||
Parse the cli input
|
||||
'''
|
||||
import salt.log
|
||||
parser = optparse.OptionParser()
|
||||
parser.add_option('-d',
|
||||
'--daemon',
|
||||
dest='daemon',
|
||||
default=False,
|
||||
action='store_true',
|
||||
help='Run the minion as a daemon')
|
||||
parser.add_option('-c',
|
||||
'--config',
|
||||
dest='config',
|
||||
default='/etc/salt/minion',
|
||||
help='Pass in an alternative configuration file')
|
||||
parser.add_option('-l',
|
||||
'--log-level',
|
||||
dest='log_level',
|
||||
default='warning',
|
||||
choices=salt.log.LOG_LEVELS.keys(),
|
||||
help='Console log level. One of %s. For the logfile settings '
|
||||
'see the config file. Default: \'%%default\'.' %
|
||||
', '.join([repr(l) for l in salt.log.LOG_LEVELS.keys()]))
|
||||
|
||||
options, args = parser.parse_args()
|
||||
salt.log.setup_console_logger(options.log_level)
|
||||
cli = {'daemon': options.daemon,
|
||||
'config': options.config}
|
||||
|
||||
return cli
|
||||
|
||||
def start(self):
|
||||
'''
|
||||
Execute this method to start up a minion.
|
||||
'''
|
||||
verify_env([self.opts['pki_dir'], self.opts['cachedir'],
|
||||
os.path.dirname(self.opts['log_file']),
|
||||
])
|
||||
import salt.log
|
||||
salt.log.setup_logfile_logger(
|
||||
self.opts['log_file'], self.opts['log_level']
|
||||
)
|
||||
for name, level in self.opts['log_granular_levels'].iteritems():
|
||||
salt.log.set_logger_level(name, level)
|
||||
|
||||
import logging
|
||||
|
||||
# Late import so logging works correctly
|
||||
import salt.minion
|
||||
if self.cli['daemon']:
|
||||
# Late import so logging works correctly
|
||||
import salt.utils
|
||||
salt.utils.daemonize()
|
||||
minion = salt.minion.Minion(self.opts)
|
||||
minion.tune_in()
|
||||
|
||||
|
||||
class Syndic(object):
|
||||
'''
|
||||
Create a syndic server
|
||||
'''
|
||||
def __init__(self):
|
||||
self.cli = self.__parse_cli()
|
||||
self.opts = self.__prep_opts()
|
||||
|
||||
def __prep_opts(self):
|
||||
'''
|
||||
Generate the opts used by the syndic
|
||||
'''
|
||||
opts = salt.config.master_config(self.cli['master_config'])
|
||||
opts['_minion_conf_file'] = opts['conf_file']
|
||||
opts.update(salt.config.minion_config(self.cli['minion_config']))
|
||||
if opts.has_key('syndic_master'):
|
||||
# Some of the opts need to be changed to match the needed opts
|
||||
# in the minion class.
|
||||
opts['master'] = opts['syndic_master']
|
||||
opts['master_ip'] = salt.config.dns_check(opts['master'])
|
||||
|
||||
opts['master_uri'] = 'tcp://' + opts['master_ip'] + ':'\
|
||||
+ str(opts['master_port'])
|
||||
opts['_master_conf_file'] = opts['conf_file']
|
||||
opts.pop('conf_file')
|
||||
return opts
|
||||
err = 'The syndic_master needs to be configured in the salt master'\
|
||||
+ ' config, EXITING!\n'
|
||||
sys.stderr.write(err)
|
||||
sys.exit(2)
|
||||
|
||||
def __parse_cli(self):
|
||||
'''
|
||||
Parse the cli for options passed to a master daemon
|
||||
'''
|
||||
import salt.log
|
||||
parser = optparse.OptionParser()
|
||||
parser.add_option('-d',
|
||||
'--daemon',
|
||||
dest='daemon',
|
||||
default=False,
|
||||
action='store_true',
|
||||
help='Run the master in a daemon')
|
||||
parser.add_option('--master-config',
|
||||
dest='master_config',
|
||||
default='/etc/salt/master',
|
||||
help='Pass in an alternative master configuration file')
|
||||
parser.add_option('--minion-config',
|
||||
dest='minion_config',
|
||||
default='/etc/salt/minion',
|
||||
help='Pass in an alternative minion configuration file')
|
||||
parser.add_option('-l',
|
||||
'--log-level',
|
||||
dest='log_level',
|
||||
default='warning',
|
||||
choices=salt.log.LOG_LEVELS.keys(),
|
||||
help='Console log level. One of %s. For the logfile settings '
|
||||
'see the config file. Default: \'%%default\'.' %
|
||||
', '.join([repr(l) for l in salt.log.LOG_LEVELS.keys()])
|
||||
)
|
||||
|
||||
options, args = parser.parse_args()
|
||||
salt.log.setup_console_logger(options.log_level)
|
||||
|
||||
cli = {'daemon': options.daemon,
|
||||
'minion_config': options.minion_config,
|
||||
'master_config': options.master_config,
|
||||
}
|
||||
|
||||
return cli
|
||||
|
||||
def start(self):
|
||||
'''
|
||||
Execute this method to start up a syndic.
|
||||
'''
|
||||
verify_env([self.opts['pki_dir'], self.opts['cachedir'],
|
||||
os.path.dirname(self.opts['log_file']),
|
||||
])
|
||||
import salt.log
|
||||
salt.log.setup_logfile_logger(
|
||||
self.opts['log_file'], self.opts['log_level']
|
||||
)
|
||||
for name, level in self.opts['log_granular_levels'].iteritems():
|
||||
salt.log.set_logger_level(name, level)
|
||||
|
||||
import logging
|
||||
|
||||
# Late import so logging works correctly
|
||||
import salt.minion
|
||||
syndic = salt.minion.Syndic(self.opts)
|
||||
if self.cli['daemon']:
|
||||
# Late import so logging works correctly
|
||||
import salt.utils
|
||||
salt.utils.daemonize()
|
||||
syndic.tune_in()
|
611
debian/salt/usr/share/pyshared/salt/cli/__init__.py
vendored
611
debian/salt/usr/share/pyshared/salt/cli/__init__.py
vendored
|
@ -1,611 +0,0 @@
|
|||
'''
|
||||
The management of salt command line utilities are stored in here
|
||||
'''
|
||||
# Import python libs
|
||||
import optparse
|
||||
import os
|
||||
import sys
|
||||
import yaml
|
||||
JSON = False
|
||||
try:
|
||||
import json
|
||||
JSON = True
|
||||
except:
|
||||
pass
|
||||
|
||||
# Import salt components
|
||||
import salt.client
|
||||
import salt.runner
|
||||
import salt.cli.key
|
||||
import salt.cli.cp
|
||||
import salt.cli.caller
|
||||
import salt.output
|
||||
|
||||
class SaltCMD(object):
|
||||
'''
|
||||
The execution of a salt command happens here
|
||||
'''
|
||||
def __init__(self):
|
||||
'''
|
||||
Create a SaltCMD object
|
||||
'''
|
||||
self.opts = self.__parse()
|
||||
|
||||
def __parse(self):
|
||||
'''
|
||||
Parse the command line
|
||||
'''
|
||||
parser = optparse.OptionParser()
|
||||
|
||||
parser.add_option('-t',
|
||||
'--timeout',
|
||||
default=5,
|
||||
type=int,
|
||||
dest='timeout',
|
||||
help='Set the return timeout for batch jobs; default=5 seconds')
|
||||
parser.add_option('-E',
|
||||
'--pcre',
|
||||
default=False,
|
||||
dest='pcre',
|
||||
action='store_true',
|
||||
help='Instead of using shell globs to evaluate the target'\
|
||||
+ ' servers, use pcre regular expressions')
|
||||
parser.add_option('-L',
|
||||
'--list',
|
||||
default=False,
|
||||
dest='list_',
|
||||
action='store_true',
|
||||
help='Instead of using shell globs to evaluate the target'\
|
||||
+ ' servers, take a comma delimited list of servers.')
|
||||
parser.add_option('-G',
|
||||
'--grain',
|
||||
default=False,
|
||||
dest='grain',
|
||||
action='store_true',
|
||||
help='Instead of using shell globs to evaluate the target'\
|
||||
+ ' use a grain value to identify targets, the syntax'\
|
||||
+ ' for the target is the grain key followed by a pcre'\
|
||||
+ ' regular expression:\n"os:Arch.*"')
|
||||
parser.add_option('-X',
|
||||
'--exsel',
|
||||
default=False,
|
||||
dest='exsel',
|
||||
action='store_true',
|
||||
help='Instead of using shell globs use the return code'\
|
||||
+ ' of a function.')
|
||||
parser.add_option('--return',
|
||||
default='',
|
||||
dest='return_',
|
||||
help='Set an alternative return method. By default salt will'\
|
||||
+ ' send the return data from the command back to the'\
|
||||
+ ' master, but the return data can be redirected into'\
|
||||
+ ' any number of systems, databases or applications.')
|
||||
parser.add_option('-Q',
|
||||
'--query',
|
||||
dest='query',
|
||||
default=False,
|
||||
action='store_true',
|
||||
help='Execute a salt command query, this can be used to find'\
|
||||
+ ' the results os a previous function call: -Q test.echo')
|
||||
parser.add_option('-c',
|
||||
'--config',
|
||||
default='/etc/salt/master',
|
||||
dest='conf_file',
|
||||
help='The location of the salt master configuration file,'\
|
||||
+ ' the salt master settings are required to know where'\
|
||||
+ ' the connections are; default=/etc/salt/master')
|
||||
parser.add_option('--raw-out',
|
||||
default=False,
|
||||
action='store_true',
|
||||
dest='raw_out',
|
||||
help='Print the output from the salt command in raw python'\
|
||||
+ ' form, this is suitable for re-reading the output into'\
|
||||
+ ' an executing python script with eval.')
|
||||
parser.add_option('--text-out',
|
||||
default=False,
|
||||
action='store_true',
|
||||
dest='txt_out',
|
||||
help='Print the output from the salt command in the same form '\
|
||||
+ 'the shell would.')
|
||||
parser.add_option('--yaml-out',
|
||||
default=False,
|
||||
action='store_true',
|
||||
dest='yaml_out',
|
||||
help='Print the output from the salt command in yaml.')
|
||||
if JSON:
|
||||
parser.add_option('--json-out',
|
||||
default=False,
|
||||
action='store_true',
|
||||
dest='json_out',
|
||||
help='Print the output from the salt command in json.')
|
||||
|
||||
options, args = parser.parse_args()
|
||||
|
||||
opts = {}
|
||||
|
||||
opts['timeout'] = options.timeout
|
||||
opts['pcre'] = options.pcre
|
||||
opts['list'] = options.list_
|
||||
opts['grain'] = options.grain
|
||||
opts['exsel'] = options.exsel
|
||||
opts['return'] = options.return_
|
||||
opts['conf_file'] = options.conf_file
|
||||
opts['raw_out'] = options.raw_out
|
||||
opts['txt_out'] = options.txt_out
|
||||
opts['yaml_out'] = options.yaml_out
|
||||
if JSON:
|
||||
opts['json_out'] = options.json_out
|
||||
else:
|
||||
opts['json_out'] = False
|
||||
|
||||
if opts['return']:
|
||||
if opts['timeout'] == 5:
|
||||
opts['timeout'] = 0
|
||||
|
||||
if options.query:
|
||||
opts['query'] = options.query
|
||||
if len(args) < 1:
|
||||
err = 'Please pass in a command to query the old salt calls'\
|
||||
+ ' for.'
|
||||
sys.stderr.write(err, + '\n')
|
||||
sys.exit('2')
|
||||
opts['cmd'] = args[0]
|
||||
else:
|
||||
# Catch invalid invocations of salt such as: salt run
|
||||
if len(args) <= 1:
|
||||
parser.print_help()
|
||||
parser.exit()
|
||||
|
||||
if opts['list']:
|
||||
opts['tgt'] = args[0].split(',')
|
||||
else:
|
||||
opts['tgt'] = args[0]
|
||||
|
||||
if args[1].count(','):
|
||||
opts['fun'] = args[1].split(',')
|
||||
opts['arg'] = []
|
||||
for comp in ' '.join(args[2:]).split(','):
|
||||
opts['arg'].append(comp.split())
|
||||
if len(opts['fun']) != len(opts['arg']):
|
||||
err = 'Cannot execute compound command without defining'\
|
||||
+ ' all arguments.'
|
||||
sys.stderr.write(err + '\n')
|
||||
sys.exit(42)
|
||||
else:
|
||||
opts['fun'] = args[1]
|
||||
opts['arg'] = args[2:]
|
||||
|
||||
return opts
|
||||
|
||||
def run(self):
|
||||
'''
|
||||
Execute the salt command line
|
||||
'''
|
||||
local = salt.client.LocalClient(self.opts['conf_file'])
|
||||
if self.opts.has_key('query'):
|
||||
print local.find_cmd(self.opts['cmd'])
|
||||
else:
|
||||
args = [self.opts['tgt'],
|
||||
self.opts['fun'],
|
||||
self.opts['arg'],
|
||||
self.opts['timeout'],
|
||||
]
|
||||
if self.opts['pcre']:
|
||||
args.append('pcre')
|
||||
elif self.opts['list']:
|
||||
args.append('list')
|
||||
elif self.opts['grain']:
|
||||
args.append('grain')
|
||||
elif self.opts['exsel']:
|
||||
args.append('exsel')
|
||||
else:
|
||||
args.append('glob')
|
||||
|
||||
if self.opts['return']:
|
||||
args.append(self.opts['return'])
|
||||
full_ret = local.cmd_full_return(*args)
|
||||
ret, out = self._format_ret(full_ret)
|
||||
|
||||
# Handle special case commands
|
||||
if self.opts['fun'] == 'sys.doc':
|
||||
self._print_docs(ret)
|
||||
else:
|
||||
if isinstance(ret, list) or isinstance(ret, dict):
|
||||
# Determine the proper output method and run it
|
||||
get_outputter = salt.output.get_outputter
|
||||
if self.opts['raw_out']:
|
||||
printout = get_outputter('raw')
|
||||
elif self.opts['json_out']:
|
||||
printout = get_outputter('json')
|
||||
elif self.opts['txt_out']:
|
||||
printout = get_outputter('txt')
|
||||
elif self.opts['yaml_out']:
|
||||
printout = get_outputter('yaml')
|
||||
elif out:
|
||||
printout = get_outputter(out)
|
||||
else:
|
||||
printout = get_outputter(None)
|
||||
|
||||
printout(ret)
|
||||
|
||||
def _format_ret(self, full_ret):
|
||||
'''
|
||||
Take the full return data and format it to simple output
|
||||
'''
|
||||
ret = {}
|
||||
out = ''
|
||||
for key, data in full_ret.items():
|
||||
ret[key] = data['ret']
|
||||
if data.has_key('out'):
|
||||
out = data['out']
|
||||
return ret, out
|
||||
|
||||
def _print_docs(self, ret):
|
||||
'''
|
||||
Print out the docstrings for all of the functions on the minions
|
||||
'''
|
||||
docs = {}
|
||||
if not ret:
|
||||
sys.stderr.write('No minions found to gather docs from\n')
|
||||
for host in ret:
|
||||
for fun in ret[host]:
|
||||
if not docs.has_key(fun):
|
||||
if ret[host][fun]:
|
||||
docs[fun] = ret[host][fun]
|
||||
for fun in sorted(docs):
|
||||
print fun + ':'
|
||||
print docs[fun]
|
||||
print ''
|
||||
|
||||
|
||||
class SaltCP(object):
|
||||
'''
|
||||
Run the salt-cp command line client
|
||||
'''
|
||||
def __init__(self):
|
||||
self.opts = self.__parse()
|
||||
|
||||
def __parse(self):
|
||||
'''
|
||||
Parse the command line
|
||||
'''
|
||||
parser = optparse.OptionParser()
|
||||
|
||||
parser.add_option('-t',
|
||||
'--timeout',
|
||||
default=5,
|
||||
type=int,
|
||||
dest='timeout',
|
||||
help='Set the return timeout for batch jobs; default=5 seconds')
|
||||
parser.add_option('-E',
|
||||
'--pcre',
|
||||
default=False,
|
||||
dest='pcre',
|
||||
action='store_true',
|
||||
help='Instead of using shell globs to evaluate the target'\
|
||||
+ ' servers, use pcre regular expressions')
|
||||
parser.add_option('-L',
|
||||
'--list',
|
||||
default=False,
|
||||
dest='list_',
|
||||
action='store_true',
|
||||
help='Instead of using shell globs to evaluate the target'\
|
||||
+ ' servers, take a comma delimited list of servers.')
|
||||
parser.add_option('-G',
|
||||
'--grain',
|
||||
default=False,
|
||||
dest='grain',
|
||||
action='store_true',
|
||||
help='Instead of using shell globs to evaluate the target'\
|
||||
+ ' use a grain value to identify targets, the syntax'\
|
||||
+ ' for the target is the grains key followed by a pcre'\
|
||||
+ ' regular expression:\n"os:Arch.*"')
|
||||
parser.add_option('-c',
|
||||
'--config',
|
||||
default='/etc/salt/master',
|
||||
dest='conf_file',
|
||||
help='The location of the salt master configuration file,'\
|
||||
+ ' the salt master settings are required to know where'\
|
||||
+ ' the connections are; default=/etc/salt/master')
|
||||
|
||||
options, args = parser.parse_args()
|
||||
|
||||
opts = {}
|
||||
|
||||
opts['timeout'] = options.timeout
|
||||
opts['pcre'] = options.pcre
|
||||
opts['list'] = options.list_
|
||||
opts['grain'] = options.grain
|
||||
opts['conf_file'] = options.conf_file
|
||||
|
||||
if opts['list']:
|
||||
opts['tgt'] = args[0].split(',')
|
||||
else:
|
||||
opts['tgt'] = args[0]
|
||||
opts['src'] = args[1:-1]
|
||||
opts['dest'] = args[-1]
|
||||
|
||||
return opts
|
||||
|
||||
def run(self):
|
||||
'''
|
||||
Execute salt-cp
|
||||
'''
|
||||
cp_ = salt.cli.cp.SaltCP(self.opts)
|
||||
cp_.run()
|
||||
|
||||
|
||||
class SaltKey(object):
|
||||
'''
|
||||
Initialize the Salt key manager
|
||||
'''
|
||||
def __init__(self):
|
||||
self.opts = self.__parse()
|
||||
|
||||
def __parse(self):
|
||||
'''
|
||||
Parse the command line options for the salt key
|
||||
'''
|
||||
parser = optparse.OptionParser()
|
||||
|
||||
parser.add_option('-l',
|
||||
'--list',
|
||||
dest='list_',
|
||||
default=False,
|
||||
action='store_true',
|
||||
help='List the unaccepted public keys')
|
||||
|
||||
parser.add_option('-L',
|
||||
'--list-all',
|
||||
dest='list_all',
|
||||
default=False,
|
||||
action='store_true',
|
||||
help='List all public keys')
|
||||
|
||||
parser.add_option('-a',
|
||||
'--accept',
|
||||
dest='accept',
|
||||
default='',
|
||||
help='Accept the following key')
|
||||
|
||||
parser.add_option('-A',
|
||||
'--accept-all',
|
||||
dest='accept_all',
|
||||
default=False,
|
||||
action='store_true',
|
||||
help='Accept all pending keys')
|
||||
|
||||
parser.add_option('-p',
|
||||
'--print',
|
||||
dest='print_',
|
||||
default='',
|
||||
help='Print the specified public key')
|
||||
|
||||
parser.add_option('-P',
|
||||
'--print-all',
|
||||
dest='print_all',
|
||||
default=False,
|
||||
action='store_true',
|
||||
help='Print all public keys')
|
||||
|
||||
parser.add_option('-d',
|
||||
'--delete',
|
||||
dest='delete',
|
||||
default='',
|
||||
help='Delete the named key')
|
||||
|
||||
parser.add_option('--gen-keys',
|
||||
dest='gen_keys',
|
||||
default='',
|
||||
help='Set a name to generate a keypair for use with salt')
|
||||
|
||||
parser.add_option('--gen-keys-dir',
|
||||
dest='gen_keys_dir',
|
||||
default='.',
|
||||
help='Set the direcotry to save the generated keypair, only'
|
||||
'works with "gen_keys_dir" option; default=.')
|
||||
|
||||
parser.add_option('--keysize',
|
||||
dest='keysize',
|
||||
default=256,
|
||||
type=int,
|
||||
help='Set the keysize for the generated key, only works with'
|
||||
'the "--gen-keys" option; default=256')
|
||||
|
||||
parser.add_option('-c',
|
||||
'--config',
|
||||
dest='config',
|
||||
default='/etc/salt/master',
|
||||
help='Pass in an alternative configuration file')
|
||||
|
||||
options, args = parser.parse_args()
|
||||
|
||||
opts = {}
|
||||
|
||||
opts['list'] = options.list_
|
||||
opts['list_all'] = options.list_all
|
||||
opts['accept'] = options.accept
|
||||
opts['accept_all'] = options.accept_all
|
||||
opts['print'] = options.print_
|
||||
opts['print_all'] = options.print_all
|
||||
opts['delete'] = options.delete
|
||||
opts['gen_keys'] = options.gen_keys
|
||||
opts['gen_keys_dir'] = options.gen_keys_dir
|
||||
opts['keysize'] = options.keysize
|
||||
|
||||
opts.update(salt.config.master_config(options.config))
|
||||
|
||||
return opts
|
||||
|
||||
def run(self):
|
||||
'''
|
||||
Execute saltkey
|
||||
'''
|
||||
key = salt.cli.key.Key(self.opts)
|
||||
key.run()
|
||||
|
||||
class SaltCall(object):
|
||||
'''
|
||||
Used to locally execute a salt command
|
||||
'''
|
||||
def __init__(self):
|
||||
self.opts = self.__parse()
|
||||
|
||||
def __parse(self):
|
||||
'''
|
||||
Parse the command line arguments
|
||||
'''
|
||||
parser = optparse.OptionParser()
|
||||
|
||||
parser.add_option('-g',
|
||||
'--grains',
|
||||
dest='grains',
|
||||
default=False,
|
||||
action='store_true',
|
||||
help='Return the information generated by the salt grains')
|
||||
parser.add_option('-m',
|
||||
'--module-dirs',
|
||||
dest='module_dirs',
|
||||
default='',
|
||||
help='Specify an additional directories to pull modules from,'\
|
||||
+ ' multiple directories can be delimited by commas')
|
||||
parser.add_option('-c',
|
||||
'--config',
|
||||
dest='config',
|
||||
default='/etc/salt/minion',
|
||||
help='Pass in an alternative configuration file')
|
||||
parser.add_option('-d',
|
||||
'--doc',
|
||||
dest='doc',
|
||||
default=False,
|
||||
action='store_true',
|
||||
help='Return the documentation for the specified module of'\
|
||||
+ ' for all modules if none are specified')
|
||||
parser.add_option('-l',
|
||||
'--log-level',
|
||||
default='info',
|
||||
dest='log_level',
|
||||
help='Set the output level for salt-call')
|
||||
parser.add_option('--raw-out',
|
||||
default=False,
|
||||
action='store_true',
|
||||
dest='raw_out',
|
||||
help='Print the output from the salt command in raw python'\
|
||||
+ ' form, this is suitable for re-reading the output into'\
|
||||
+ ' an executing python script with eval.')
|
||||
parser.add_option('--text-out',
|
||||
default=False,
|
||||
action='store_true',
|
||||
dest='txt_out',
|
||||
help='Print the output from the salt command in the same form '\
|
||||
+ 'the shell would.')
|
||||
parser.add_option('--yaml-out',
|
||||
default=False,
|
||||
action='store_true',
|
||||
dest='yaml_out',
|
||||
help='Print the output from the salt command in yaml.')
|
||||
if JSON:
|
||||
parser.add_option('--json-out',
|
||||
default=False,
|
||||
action='store_true',
|
||||
dest='json_out',
|
||||
help='Print the output from the salt command in json.')
|
||||
parser.add_option('--no-color',
|
||||
default=False,
|
||||
dest='no_color',
|
||||
action='store_true',
|
||||
help='Disable all colored output')
|
||||
|
||||
options, args = parser.parse_args()
|
||||
|
||||
opts = {}
|
||||
|
||||
opts['grains_run'] = options.grains
|
||||
opts['module_dirs'] = options.module_dirs.split(',')
|
||||
opts['doc'] = options.doc
|
||||
opts['raw_out'] = options.raw_out
|
||||
opts['txt_out'] = options.txt_out
|
||||
opts['yaml_out'] = options.yaml_out
|
||||
opts['color'] = not options.no_color
|
||||
if JSON:
|
||||
opts['json_out'] = options.json_out
|
||||
else:
|
||||
opts['json_out'] = False
|
||||
opts.update(salt.config.minion_config(options.config))
|
||||
opts['log_level'] = options.log_level
|
||||
if len(args) >= 1:
|
||||
opts['fun'] = args[0]
|
||||
opts['arg'] = args[1:]
|
||||
else:
|
||||
opts['fun'] = ''
|
||||
opts['arg'] = []
|
||||
|
||||
return opts
|
||||
|
||||
def run(self):
|
||||
'''
|
||||
Execute the salt call!
|
||||
'''
|
||||
import salt.log
|
||||
salt.log.setup_console_logger(
|
||||
self.opts['log_level']
|
||||
)
|
||||
import logging
|
||||
caller = salt.cli.caller.Caller(self.opts)
|
||||
caller.run()
|
||||
|
||||
class SaltRun(object):
|
||||
'''
|
||||
Used to execute salt convenience functions
|
||||
'''
|
||||
def __init__(self):
|
||||
self.opts = self.__parse()
|
||||
|
||||
def __parse(self):
|
||||
'''
|
||||
Parse the command line arguments
|
||||
'''
|
||||
parser = optparse.OptionParser()
|
||||
|
||||
parser.add_option('-c',
|
||||
'--config',
|
||||
dest='config',
|
||||
default='/etc/salt/master',
|
||||
help='Change the location of the master configuration;'\
|
||||
+ ' default=/etc/salt/master')
|
||||
|
||||
parser.add_option('-d',
|
||||
'--doc',
|
||||
'--documentation',
|
||||
dest='doc',
|
||||
default=False,
|
||||
action='store_true',
|
||||
help='Display documentation for runners, pass a module or a '\
|
||||
+ ' runner to see documentation on only that module/runner')
|
||||
|
||||
options, args = parser.parse_args()
|
||||
|
||||
opts = {}
|
||||
|
||||
opts['config'] = options.config
|
||||
opts['doc'] = options.doc
|
||||
|
||||
if len(args) > 0:
|
||||
opts['fun'] = args[0]
|
||||
else:
|
||||
opts['fun'] = ''
|
||||
if len(args) > 1:
|
||||
opts['arg'] = args[1:]
|
||||
else:
|
||||
opts['arg'] = []
|
||||
|
||||
opts.update(salt.config.master_config(options.config))
|
||||
|
||||
return opts
|
||||
|
||||
def run(self):
|
||||
'''
|
||||
Execute the salt call!
|
||||
'''
|
||||
runner = salt.runner.Runner(self.opts)
|
||||
runner.run()
|
|
@ -1,85 +0,0 @@
|
|||
'''
|
||||
The caller module is used as a front-end to manage direct calls to the salt
|
||||
minion modules.
|
||||
'''
|
||||
# Import python modules
|
||||
import os
|
||||
import pprint
|
||||
# Import salt libs
|
||||
import salt.loader
|
||||
import salt.minion
|
||||
import salt
|
||||
|
||||
|
||||
class Caller(object):
|
||||
'''
|
||||
Object to wrap the calling of local salt modules for the salt-call command
|
||||
'''
|
||||
def __init__(self, opts):
|
||||
'''
|
||||
Pass in the command line options
|
||||
'''
|
||||
self.opts = opts
|
||||
opts['grains'] = salt.loader.grains(opts)
|
||||
self.minion = salt.minion.SMinion(opts)
|
||||
|
||||
def call(self):
|
||||
'''
|
||||
Call the module
|
||||
'''
|
||||
ret = {}
|
||||
ret['return'] = self.minion.functions[self.opts['fun']](*self.opts['arg'])
|
||||
if hasattr(self.minion.functions[self.opts['fun']], '__outputter__'):
|
||||
oput = self.minion.functions[self.opts['fun']].__outputter__
|
||||
if isinstance(oput, str):
|
||||
ret['out'] = oput
|
||||
return ret
|
||||
|
||||
def print_docs(self):
|
||||
'''
|
||||
Pick up the documentation for all of the modules and print it out.
|
||||
'''
|
||||
docs = {}
|
||||
for name, func in self.minion.functions.items():
|
||||
if not docs.has_key(name):
|
||||
if func.__doc__:
|
||||
docs[name] = func.__doc__
|
||||
for name in sorted(docs):
|
||||
if name.startswith(self.opts['fun']):
|
||||
print '{0}:\n{1}\n'.format(name, docs[name])
|
||||
|
||||
def print_grains(self):
|
||||
'''
|
||||
Print out the grains
|
||||
'''
|
||||
grains = salt.loader.grains(self.opts)
|
||||
pprint.pprint(grains)
|
||||
|
||||
def run(self):
|
||||
'''
|
||||
Execute the salt call logic
|
||||
'''
|
||||
if self.opts['doc']:
|
||||
self.print_docs()
|
||||
elif self.opts['grains_run']:
|
||||
self.print_grains()
|
||||
else:
|
||||
ret = self.call()
|
||||
# Determine the proper output method and run it
|
||||
get_outputter = salt.output.get_outputter
|
||||
if self.opts['raw_out']:
|
||||
printout = get_outputter('raw')
|
||||
elif self.opts['json_out']:
|
||||
printout = get_outputter('json')
|
||||
elif self.opts['txt_out']:
|
||||
printout = get_outputter('txt')
|
||||
elif self.opts['yaml_out']:
|
||||
printout = get_outputter('yaml')
|
||||
elif ret.has_key('out'):
|
||||
printout = get_outputter(ret['out'])
|
||||
else:
|
||||
printout = get_outputter(None)
|
||||
|
||||
printout({'local': ret['return']}, color=self.opts['color'])
|
||||
|
||||
|
79
debian/salt/usr/share/pyshared/salt/cli/cp.py
vendored
79
debian/salt/usr/share/pyshared/salt/cli/cp.py
vendored
|
@ -1,79 +0,0 @@
|
|||
'''
|
||||
The cp module is used to execute the logic used by the salt-cp command
|
||||
line application, salt-cp is NOT intended to broadcast large files, it is
|
||||
intended to handle text files.
|
||||
Salt-cp can be used to distribute configuration files
|
||||
'''
|
||||
# Import python modules
|
||||
import os
|
||||
import sys
|
||||
# Import third party libs
|
||||
import yaml
|
||||
# Import salt modules
|
||||
import salt.client
|
||||
|
||||
class SaltCP(object):
|
||||
'''
|
||||
Create a salt cp object, used to distribute simple files with salt
|
||||
'''
|
||||
def __init__(self, opts):
|
||||
self.opts = opts
|
||||
|
||||
def _file_dict(self, fn_):
|
||||
'''
|
||||
Take a path and return the contents of the file as a string
|
||||
'''
|
||||
if not os.path.isfile(fn_):
|
||||
err = 'The referenced file, ' + fn_ + ' is not available.'
|
||||
sys.stderr.write(err + '\n')
|
||||
sys.exit(42)
|
||||
return {fn_: open(fn_, 'r').read()}
|
||||
|
||||
def _recurse_dir(self, fn_, files={}):
|
||||
'''
|
||||
Recursively pull files from a directory
|
||||
'''
|
||||
for base in os.listdir(fn_):
|
||||
path = os.path.join(fn_, base)
|
||||
if os.path.isdir(path):
|
||||
files.update(self._recurse_dir(path))
|
||||
else:
|
||||
files.update(self._file_dict(path))
|
||||
return files
|
||||
|
||||
def _load_files(self):
|
||||
'''
|
||||
Parse the files indicated in opts['src'] and load them into a python
|
||||
object for transport
|
||||
'''
|
||||
files = {}
|
||||
for fn_ in self.opts['src']:
|
||||
if os.path.isfile(fn_):
|
||||
files.update(self._file_dict(fn_))
|
||||
elif os.path.isdir(fn_):
|
||||
print fn_ + ' is a directory, only files are supported.'
|
||||
#files.update(self._recurse_dir(fn_))
|
||||
return files
|
||||
|
||||
|
||||
def run(self):
|
||||
'''
|
||||
Make the salt client call
|
||||
'''
|
||||
arg = [self._load_files(), self.opts['dest']]
|
||||
local = salt.client.LocalClient(self.opts['conf_file'])
|
||||
args = [self.opts['tgt'],
|
||||
'cp.recv',
|
||||
arg,
|
||||
self.opts['timeout'],
|
||||
]
|
||||
if self.opts['pcre']:
|
||||
args.append('pcre')
|
||||
elif self.opts['list']:
|
||||
args.append('list')
|
||||
elif self.opts['grain']:
|
||||
args.append('grain')
|
||||
|
||||
ret = local.cmd(*args)
|
||||
|
||||
print yaml.dump(ret)
|
185
debian/salt/usr/share/pyshared/salt/cli/key.py
vendored
185
debian/salt/usr/share/pyshared/salt/cli/key.py
vendored
|
@ -1,185 +0,0 @@
|
|||
'''
|
||||
The actual saltkey functional code
|
||||
'''
|
||||
# Import python modules
|
||||
import os
|
||||
import sys
|
||||
import shutil
|
||||
# Import salt modules
|
||||
import salt.utils as utils
|
||||
import salt.crypt
|
||||
|
||||
class Key(object):
|
||||
'''
|
||||
The object that encapsulates saltkey actions
|
||||
'''
|
||||
def __init__(self, opts):
|
||||
self.opts = opts
|
||||
|
||||
def _keys(self, key_type, full_path=False):
|
||||
'''
|
||||
Safely return the names of the unaccepted keys, pass True to return the
|
||||
full key paths
|
||||
Returns a set
|
||||
'''
|
||||
ret = set()
|
||||
subdir = ''
|
||||
if key_type == 'pre':
|
||||
subdir = 'minions_pre'
|
||||
elif key_type == 'acc':
|
||||
subdir = 'minions'
|
||||
dir_ = os.path.join(self.opts['pki_dir'], subdir)
|
||||
if not os.path.isdir(dir_):
|
||||
err = 'The ' + subdir + ' directory is not present, ensure that'\
|
||||
+ ' the master server has been started'
|
||||
sys.stderr.write(err + '\n')
|
||||
sys.exit(42)
|
||||
keys = os.listdir(dir_)
|
||||
if full_path:
|
||||
for key in keys:
|
||||
ret.add(os.path.join(dir_, key))
|
||||
else:
|
||||
ret = set(keys)
|
||||
return ret
|
||||
|
||||
def _list_pre(self):
|
||||
'''
|
||||
List the unaccepted keys
|
||||
'''
|
||||
print utils.LIGHT_RED + 'Unaccepted Keys:' + utils.ENDC
|
||||
for key in sorted(self._keys('pre')):
|
||||
print utils.RED + key + utils.ENDC
|
||||
|
||||
def _list_accepted(self):
|
||||
'''
|
||||
List the accepted public keys
|
||||
'''
|
||||
print utils.LIGHT_GREEN + 'Accepted Keys:' + utils.ENDC
|
||||
for key in sorted(self._keys('acc')):
|
||||
print utils.GREEN + key + utils.ENDC
|
||||
|
||||
def _list_all(self):
|
||||
'''
|
||||
List all keys
|
||||
'''
|
||||
self._list_pre()
|
||||
self._list_accepted()
|
||||
|
||||
def _print_key(self, name):
|
||||
'''
|
||||
Print out the specified public key
|
||||
'''
|
||||
keys = self._keys('pre', True).union(self._keys('acc', True))
|
||||
for key in sorted(keys):
|
||||
if key.endswith(name):
|
||||
print open(key, 'r').read()
|
||||
|
||||
def _print_all(self):
|
||||
'''
|
||||
Print out the public keys, all of em'
|
||||
'''
|
||||
print utils.LIGHT_RED + 'Unaccepted keys:' + utils.ENDC
|
||||
for key in sorted(self._keys('pre', True)):
|
||||
print ' ' + utils.RED + os.path.basename(key) + utils.ENDC
|
||||
print open(key, 'r').read()
|
||||
print utils.LIGHT_GREEN + 'Accepted keys:' + utils.ENDC
|
||||
for key in sorted(self._keys('acc', True)):
|
||||
print ' ' + utils.GREEN + os.path.basename(key) + utils.ENDC
|
||||
print open(key, 'r').read()
|
||||
|
||||
def _accept(self, key):
|
||||
'''
|
||||
Accept a specified host's public key
|
||||
'''
|
||||
pre_dir = os.path.join(self.opts['pki_dir'], 'minions_pre')
|
||||
minions = os.path.join(self.opts['pki_dir'], 'minions')
|
||||
if not os.path.isdir(minions):
|
||||
err = 'The minions directory is not present, ensure that the'\
|
||||
+ ' master server has been started'
|
||||
sys.stderr.write(err + '\n')
|
||||
sys.exit(42)
|
||||
if not os.path.isdir(pre_dir):
|
||||
err = 'The minions_pre directory is not present, ensure that the'\
|
||||
+ ' master server has been started'
|
||||
sys.stderr.write(err + '\n')
|
||||
sys.exit(42)
|
||||
pre = os.listdir(pre_dir)
|
||||
if not pre.count(key):
|
||||
err = 'The named host is unavailable, please accept an available'\
|
||||
+ ' key'
|
||||
sys.stderr.write(err + '\n')
|
||||
sys.exit(43)
|
||||
shutil.move(os.path.join(pre_dir, key), os.path.join(minions, key))
|
||||
|
||||
def _accept_all(self):
|
||||
'''
|
||||
Accept all keys in pre
|
||||
'''
|
||||
pre_dir = os.path.join(self.opts['pki_dir'], 'minions_pre')
|
||||
minions = os.path.join(self.opts['pki_dir'], 'minions')
|
||||
if not os.path.isdir(minions):
|
||||
err = 'The minions directory is not present, ensure that the'\
|
||||
+ ' master server has been started'
|
||||
sys.stderr.write(err + '\n')
|
||||
sys.exit(42)
|
||||
if not os.path.isdir(pre_dir):
|
||||
err = 'The minions_pre directory is not present, ensure that the'\
|
||||
+ ' master server has been started'
|
||||
sys.stderr.write(err + '\n')
|
||||
sys.exit(42)
|
||||
for key in os.listdir(pre_dir):
|
||||
self._accept(key)
|
||||
|
||||
def _delete_key(self):
|
||||
'''
|
||||
Delete a key
|
||||
'''
|
||||
pre_dir = os.path.join(self.opts['pki_dir'], 'minions_pre')
|
||||
minions = os.path.join(self.opts['pki_dir'], 'minions')
|
||||
if not os.path.isdir(minions):
|
||||
err = 'The minions directory is not present, ensure that the'\
|
||||
+ ' master server has been started'
|
||||
sys.stderr.write(err + '\n')
|
||||
sys.exit(42)
|
||||
if not os.path.isdir(pre_dir):
|
||||
err = 'The minions_pre directory is not present, ensure that the'\
|
||||
+ ' master server has been started'
|
||||
sys.stderr.write(err + '\n')
|
||||
sys.exit(42)
|
||||
pre = os.path.join(pre_dir, self.opts['delete'])
|
||||
acc = os.path.join(minions, self.opts['delete'])
|
||||
if os.path.exists(pre):
|
||||
os.remove(pre)
|
||||
print 'Removed pending key %s' % self.opts['delete']
|
||||
if os.path.exists(acc):
|
||||
os.remove(acc)
|
||||
print 'Removed accepted key %s' % self.opts['delete']
|
||||
|
||||
def run(self):
|
||||
'''
|
||||
Run the logic for saltkey
|
||||
'''
|
||||
if self.opts['gen_keys']:
|
||||
salt.crypt.gen_keys(
|
||||
self.opts['gen_keys_dir'],
|
||||
self.opts['gen_keys'],
|
||||
self.opts['keysize'])
|
||||
return
|
||||
if self.opts['list']:
|
||||
self._list_pre()
|
||||
elif self.opts['list_all']:
|
||||
self._list_all()
|
||||
elif self.opts['print']:
|
||||
self._print_key(self.opts['print'])
|
||||
elif self.opts['print_all']:
|
||||
self._print_all()
|
||||
elif self.opts['accept']:
|
||||
self._accept(self.opts['accept'])
|
||||
elif self.opts['accept_all']:
|
||||
self._accept_all()
|
||||
elif self.opts['delete']:
|
||||
self._delete_key()
|
||||
else:
|
||||
self._list_all()
|
||||
|
||||
|
344
debian/salt/usr/share/pyshared/salt/client.py
vendored
344
debian/salt/usr/share/pyshared/salt/client.py
vendored
|
@ -1,344 +0,0 @@
|
|||
'''
|
||||
The client module is used to create a client connection to the publisher
|
||||
The data structure needs to be:
|
||||
{'enc': 'clear',
|
||||
'load': {'fun': '<mod.callable>',
|
||||
'arg':, ('arg1', 'arg2', ...),
|
||||
'tgt': '<glob or id>',
|
||||
'key': '<read in the key file>'}
|
||||
'''
|
||||
# The components here are simple, and they need to be and stay simple, we
|
||||
# want a client to have 3 external concerns, and maybe a forth configurable
|
||||
# option.
|
||||
# The concerns are
|
||||
# 1. Who executes the command?
|
||||
# 2. what is the function being run?
|
||||
# 3. What arguments need to be passed to the function?
|
||||
# 4. How long do we wait for all of the replies?
|
||||
#
|
||||
# Next there are a number of tasks, first we need some kind of authentication
|
||||
# This Client initially will be the master root client, which will run as the
|
||||
# root user on the master server.
|
||||
# BUT we also want a client to be able to work over the network, so that
|
||||
# controllers can exist within disparate applications.
|
||||
# The problem is that this is a security nightmare, so I am going to start
|
||||
# small, and only start with the ability to execute salt commands locally.
|
||||
# This means that the primary client to build is, the LocalClient
|
||||
|
||||
import os
|
||||
import re
|
||||
import glob
|
||||
import time
|
||||
import datetime
|
||||
import cPickle as pickle
|
||||
|
||||
# Import zmq modules
|
||||
import zmq
|
||||
|
||||
# Import salt modules
|
||||
import salt.config
|
||||
import salt.payload
|
||||
|
||||
|
||||
def prep_jid(cachedir):
|
||||
'''
|
||||
Parses the job return directory, generates a job id and sets up the
|
||||
job id directory.
|
||||
'''
|
||||
jid_root = os.path.join(cachedir, 'jobs')
|
||||
jid = datetime.datetime.strftime(
|
||||
datetime.datetime.now(), '%Y%m%d%H%M%S%f'
|
||||
)
|
||||
jid_dir = os.path.join(jid_root, jid)
|
||||
if not os.path.isdir(jid_dir):
|
||||
os.makedirs(jid_dir)
|
||||
else:
|
||||
return prep_jid(load)
|
||||
return jid
|
||||
|
||||
class SaltClientError(Exception): pass
|
||||
|
||||
class LocalClient(object):
|
||||
'''
|
||||
Connect to the salt master via the local server and via root
|
||||
'''
|
||||
def __init__(self, c_path='/etc/salt/master'):
|
||||
self.opts = salt.config.master_config(c_path)
|
||||
self.key = self.__read_master_key()
|
||||
|
||||
def __read_master_key(self):
|
||||
'''
|
||||
Read in the rotating master authentication key
|
||||
'''
|
||||
try:
|
||||
keyfile = os.path.join(self.opts['cachedir'], '.root_key')
|
||||
key = open(keyfile, 'r').read()
|
||||
return key
|
||||
except:
|
||||
raise SaltClientError('Failed to read in the salt root key')
|
||||
|
||||
def _check_glob_minions(self, expr):
|
||||
'''
|
||||
Return the minions found by looking via globs
|
||||
'''
|
||||
cwd = os.getcwd()
|
||||
os.chdir(os.path.join(self.opts['pki_dir'], 'minions'))
|
||||
ret = set(glob.glob(expr))
|
||||
os.chdir(cwd)
|
||||
return ret
|
||||
|
||||
def _check_list_minions(self, expr):
|
||||
'''
|
||||
Return the minions found by looking via a list
|
||||
'''
|
||||
ret = []
|
||||
for fn_ in os.listdir(os.path.join(self.opts['pki_dir'], 'minions')):
|
||||
if expr.count(fn_):
|
||||
if not ret.count(fn_):
|
||||
ret.append(fn_)
|
||||
return ret
|
||||
|
||||
def _check_pcre_minions(self, expr):
|
||||
'''
|
||||
Return the minions found by looking via regular expressions
|
||||
'''
|
||||
ret = set()
|
||||
cwd = os.getcwd()
|
||||
os.chdir(os.path.join(self.opts['pki_dir'], 'minions'))
|
||||
reg = re.compile(expr)
|
||||
for fn_ in os.listdir('.'):
|
||||
if reg.match(fn_):
|
||||
ret.add(fn_)
|
||||
os.chdir(cwd)
|
||||
return ret
|
||||
|
||||
def _check_grain_minions(self, expr):
|
||||
'''
|
||||
Return the minions found by looking via a list
|
||||
'''
|
||||
return os.listdir(os.path.join(self.opts['pki_dir'], 'minions'))
|
||||
|
||||
def cmd(
|
||||
self,
|
||||
tgt,
|
||||
fun,
|
||||
arg=(),
|
||||
timeout=5,
|
||||
expr_form='glob',
|
||||
ret=''):
|
||||
'''
|
||||
Execute a salt command and return.
|
||||
'''
|
||||
jid = prep_jid(self.opts['cachedir'])
|
||||
pub_data = self.pub(
|
||||
tgt,
|
||||
fun,
|
||||
arg,
|
||||
expr_form,
|
||||
ret,
|
||||
jid=jid,
|
||||
timeout=timeout)
|
||||
return self.get_returns(pub_data['jid'], pub_data['minions'], timeout)
|
||||
|
||||
def cmd_full_return(
|
||||
self,
|
||||
tgt,
|
||||
fun,
|
||||
arg=(),
|
||||
timeout=5,
|
||||
expr_form='glob',
|
||||
ret=''):
|
||||
'''
|
||||
Execute a salt command and return
|
||||
'''
|
||||
jid = prep_jid(self.opts['cachedir'])
|
||||
pub_data = self.pub(
|
||||
tgt,
|
||||
fun,
|
||||
arg,
|
||||
expr_form,
|
||||
ret,
|
||||
jid=jid,
|
||||
timeout=timeout)
|
||||
return self.get_full_returns(pub_data['jid'], pub_data['minions'], timeout)
|
||||
|
||||
def get_returns(self, jid, minions, timeout=5):
|
||||
'''
|
||||
This method starts off a watcher looking at the return data for a
|
||||
specified jid
|
||||
'''
|
||||
jid_dir = os.path.join(self.opts['cachedir'], 'jobs', jid)
|
||||
start = 999999999999
|
||||
gstart = int(time.time())
|
||||
ret = {}
|
||||
# Wait for the hosts to check in
|
||||
while True:
|
||||
for fn_ in os.listdir(jid_dir):
|
||||
if fn_.startswith('.'):
|
||||
continue
|
||||
if not ret.has_key(fn_):
|
||||
retp = os.path.join(jid_dir, fn_, 'return.p')
|
||||
if not os.path.isfile(retp):
|
||||
continue
|
||||
while not ret.has_key(fn_):
|
||||
try:
|
||||
ret[fn_] = pickle.load(open(retp, 'r'))
|
||||
except:
|
||||
pass
|
||||
if ret and start == 999999999999:
|
||||
start = int(time.time())
|
||||
if len(ret) >= len(minions):
|
||||
return ret
|
||||
if int(time.time()) > start + timeout:
|
||||
return ret
|
||||
if int(time.time()) > gstart + timeout and not ret:
|
||||
# No minions have replied within the specified global timeout,
|
||||
# return an empty dict
|
||||
return ret
|
||||
time.sleep(0.02)
|
||||
|
||||
def get_full_returns(self, jid, minions, timeout=5):
|
||||
'''
|
||||
This method starts off a watcher looking at the return data for a
|
||||
specified jid, it returns all of the information for the jid
|
||||
'''
|
||||
jid_dir = os.path.join(self.opts['cachedir'], 'jobs', jid)
|
||||
start = 999999999999
|
||||
gstart = int(time.time())
|
||||
ret = {}
|
||||
# Wait for the hosts to check in
|
||||
while True:
|
||||
for fn_ in os.listdir(jid_dir):
|
||||
if fn_.startswith('.'):
|
||||
continue
|
||||
if not ret.has_key(fn_):
|
||||
retp = os.path.join(jid_dir, fn_, 'return.p')
|
||||
outp = os.path.join(jid_dir, fn_, 'out.p')
|
||||
if not os.path.isfile(retp):
|
||||
continue
|
||||
while not ret.has_key(fn_):
|
||||
try:
|
||||
ret_data = pickle.load(open(retp, 'r'))
|
||||
ret[fn_] = {'ret': ret_data}
|
||||
if os.path.isfile(outp):
|
||||
ret[fn_]['out'] = pickle.load(open(outp, 'r'))
|
||||
except:
|
||||
pass
|
||||
if ret and start == 999999999999:
|
||||
start = int(time.time())
|
||||
if len(ret) >= len(minions):
|
||||
return ret
|
||||
if int(time.time()) > start + timeout:
|
||||
return ret
|
||||
if int(time.time()) > gstart + timeout and not ret:
|
||||
# No minions have replied within the specified global timeout,
|
||||
# return an empty dict
|
||||
return ret
|
||||
time.sleep(0.02)
|
||||
|
||||
def find_cmd(self, cmd):
|
||||
'''
|
||||
Hunt through the old salt calls for when cmd was run, return a dict:
|
||||
{'<jid>': <return_obj>}
|
||||
'''
|
||||
job_dir = os.path.join(self.opts['cachedir'], 'jobs')
|
||||
ret = {}
|
||||
for jid in os.listdir(job_dir):
|
||||
jid_dir = os.path.join(job_dir, jid)
|
||||
loadp = os.path.join(jid_dir, '.load.p')
|
||||
if os.path.isfile(loadp):
|
||||
try:
|
||||
load = pickle.load(open(loadp, 'r'))
|
||||
if load['fun'] == cmd:
|
||||
# We found a match! Add the return values
|
||||
ret[jid] = {}
|
||||
for host in os.listdir(jid_dir):
|
||||
host_dir = os.path.join(jid_dir, host)
|
||||
retp = os.path.join(host_dir, 'return.p')
|
||||
if not os.path.isfile(retp):
|
||||
continue
|
||||
ret[jid][host] = pickle.load(open(retp))
|
||||
except:
|
||||
continue
|
||||
else:
|
||||
continue
|
||||
return ret
|
||||
|
||||
def check_minions(self, expr, expr_form='glob'):
|
||||
'''
|
||||
Check the passed regex against the available minions' public
|
||||
keys stored for authentication. This should return a set of ids
|
||||
which match the regex, this will then be used to parse the
|
||||
returns to make sure everyone has checked back in.
|
||||
'''
|
||||
return {'glob': self._check_glob_minions,
|
||||
'pcre': self._check_pcre_minions,
|
||||
'list': self._check_list_minions,
|
||||
'grain': self._check_grain_minions,
|
||||
'exsel': self._check_grain_minions,
|
||||
}[expr_form](expr)
|
||||
|
||||
def pub(self, tgt, fun, arg=(), expr_form='glob', ret='', jid='', timeout=5):
|
||||
'''
|
||||
Take the required arguments and publish the given command.
|
||||
Arguments:
|
||||
tgt:
|
||||
The tgt is a regex or a glob used to match up the ids on
|
||||
the minions. Salt works by always publishing every command to
|
||||
all of the minions and then the minions determine if the
|
||||
command is for them based on the tgt value.
|
||||
fun:
|
||||
The function name to be called on the remote host(s), this must
|
||||
be a string in the format "<modulename>.<function name>"
|
||||
arg:
|
||||
The arg option needs to be a tuple of arguments to pass to the
|
||||
calling function, if left blank
|
||||
Returns:
|
||||
jid:
|
||||
A string, as returned by the publisher, which is the job id,
|
||||
this will inform the client where to get the job results
|
||||
minions:
|
||||
A set, the targets that the tgt passed should match.
|
||||
'''
|
||||
# Run a check_minions, if no minions match return False
|
||||
# format the payload - make a function that does this in the payload
|
||||
# module
|
||||
# make the zmq client
|
||||
# connect to the req server
|
||||
# send!
|
||||
# return what we get back
|
||||
minions = self.check_minions(tgt, expr_form)
|
||||
if not minions:
|
||||
return {'jid': '',
|
||||
'minions': minions}
|
||||
if self.opts['order_masters']:
|
||||
package = salt.payload.format_payload(
|
||||
'clear',
|
||||
cmd='publish',
|
||||
tgt=tgt,
|
||||
fun=fun,
|
||||
arg=arg,
|
||||
key=self.key,
|
||||
tgt_type=expr_form,
|
||||
ret=ret,
|
||||
jid=jid,
|
||||
to=timeout)
|
||||
else:
|
||||
package = salt.payload.format_payload(
|
||||
'clear',
|
||||
cmd='publish',
|
||||
tgt=tgt,
|
||||
fun=fun,
|
||||
arg=arg,
|
||||
key=self.key,
|
||||
tgt_type=expr_form,
|
||||
jid=jid,
|
||||
ret=ret)
|
||||
# Prep zmq
|
||||
context = zmq.Context()
|
||||
socket = context.socket(zmq.REQ)
|
||||
socket.connect('tcp://%(interface)s:%(ret_port)s' % self.opts)
|
||||
socket.send(package)
|
||||
payload = salt.payload.unpackage(socket.recv())
|
||||
return {'jid': payload['load']['jid'],
|
||||
'minions': minions}
|
167
debian/salt/usr/share/pyshared/salt/config.py
vendored
167
debian/salt/usr/share/pyshared/salt/config.py
vendored
|
@ -1,167 +0,0 @@
|
|||
'''
|
||||
All salt configuration loading and defaults should be in this module
|
||||
'''
|
||||
# Import python modules
|
||||
import os
|
||||
import socket
|
||||
import sys
|
||||
# Import third party libs
|
||||
import yaml
|
||||
# Import salt libs
|
||||
import salt.crypt
|
||||
import salt.loader
|
||||
import salt.utils
|
||||
|
||||
def load_config(opts, path, env_var):
|
||||
'''
|
||||
Attempts to update ``opts`` dict by parsing either the file described by
|
||||
``path`` or the environment variable described by ``env_var`` as YAML.
|
||||
'''
|
||||
|
||||
if not path or not os.path.isfile(path):
|
||||
path = os.environ.get(env_var, '')
|
||||
|
||||
if os.path.isfile(path):
|
||||
try:
|
||||
conf_opts = yaml.load(open(path, 'r'))
|
||||
if conf_opts == None:
|
||||
# The config file is empty and the yaml.load returned None
|
||||
conf_opts = {}
|
||||
opts.update(conf_opts)
|
||||
opts['conf_file'] = path
|
||||
except Exception, e:
|
||||
print 'Error parsing configuration file: {0} - {1}'.format(path, e)
|
||||
else:
|
||||
print 'Missing configuration file: {0}'.format(path)
|
||||
|
||||
def prepend_root_dir(opts, path_options):
|
||||
'''
|
||||
Prepends the options that represent filesystem paths with value of the
|
||||
'root_dir' option.
|
||||
'''
|
||||
for path_option in path_options:
|
||||
opts[path_option] = os.path.normpath(
|
||||
os.sep.join([opts['root_dir'], opts[path_option]]))
|
||||
|
||||
def minion_config(path):
|
||||
'''
|
||||
Reads in the minion configuration file and sets up special options
|
||||
'''
|
||||
opts = {'master': 'salt',
|
||||
'master_port': '4506',
|
||||
'root_dir': '/',
|
||||
'pki_dir': '/etc/salt/pki',
|
||||
'id': socket.getfqdn(),
|
||||
'cachedir': '/var/cache/salt',
|
||||
'conf_file': path,
|
||||
'renderer': 'yaml_jinja',
|
||||
'disable_modules': [],
|
||||
'disable_returners': [],
|
||||
'module_dirs': [],
|
||||
'returner_dirs': [],
|
||||
'states_dirs': [],
|
||||
'render_dirs': [],
|
||||
'open_mode': False,
|
||||
'multiprocessing': True,
|
||||
'log_file': '/var/log/salt/minion',
|
||||
'log_level': 'warning',
|
||||
'log_granular_levels': {},
|
||||
'test': False,
|
||||
'cython_enable': False,
|
||||
}
|
||||
|
||||
load_config(opts, path, 'SALT_MINION_CONFIG')
|
||||
|
||||
opts['master_ip'] = dns_check(opts['master'])
|
||||
|
||||
opts['master_uri'] = 'tcp://' + opts['master_ip'] + ':'\
|
||||
+ str(opts['master_port'])
|
||||
|
||||
# Enabling open mode requires that the value be set to True, and nothing
|
||||
# else!
|
||||
if opts['open_mode']:
|
||||
if opts['open_mode'] == True:
|
||||
opts['open_mode'] = True
|
||||
else:
|
||||
opts['open_mode'] = False
|
||||
|
||||
opts['grains'] = salt.loader.grains(opts)
|
||||
|
||||
# Prepend root_dir to other paths
|
||||
prepend_root_dir(opts, ['pki_dir', 'cachedir', 'log_file'])
|
||||
|
||||
return opts
|
||||
|
||||
def master_config(path):
|
||||
'''
|
||||
Reads in the master configuration file and sets up default options
|
||||
'''
|
||||
opts = {'interface': '0.0.0.0',
|
||||
'publish_port': '4505',
|
||||
'worker_threads': 5,
|
||||
'sock_dir': '/tmp/.salt-unix',
|
||||
'ret_port': '4506',
|
||||
'keep_jobs': 24,
|
||||
'root_dir': '/',
|
||||
'pki_dir': '/etc/salt/pki',
|
||||
'cachedir': '/var/cache/salt',
|
||||
'file_roots': {
|
||||
'base': ['/srv/salt'],
|
||||
},
|
||||
'file_buffer_size': 1048576,
|
||||
'hash_type': 'md5',
|
||||
'conf_file': path,
|
||||
'open_mode': False,
|
||||
'auto_accept': False,
|
||||
'renderer': 'yaml_jinja',
|
||||
'state_top': 'top.sls',
|
||||
'order_masters': False,
|
||||
'log_file': '/var/log/salt/master',
|
||||
'log_level': 'warning',
|
||||
'log_granular_levels': {},
|
||||
'cluster_masters': [],
|
||||
'cluster_mode': 'paranoid',
|
||||
}
|
||||
|
||||
load_config(opts, path, 'SALT_MASTER_CONFIG')
|
||||
|
||||
opts['aes'] = salt.crypt.Crypticle.generate_key_string()
|
||||
|
||||
# Prepend root_dir to other paths
|
||||
prepend_root_dir(opts, ['pki_dir', 'cachedir', 'log_file'])
|
||||
|
||||
# Enabling open mode requires that the value be set to True, and nothing
|
||||
# else!
|
||||
if opts['open_mode']:
|
||||
if opts['open_mode'] == True:
|
||||
opts['open_mode'] = True
|
||||
else:
|
||||
opts['open_mode'] = False
|
||||
if opts['auto_accept']:
|
||||
if opts['auto_accept'] == True:
|
||||
opts['auto_accept'] = True
|
||||
else:
|
||||
opts['auto_accept'] = False
|
||||
return opts
|
||||
|
||||
def dns_check(addr):
|
||||
'''
|
||||
Verify that the passed address is valid and return the ipv4 addr if it is
|
||||
a hostname
|
||||
'''
|
||||
try:
|
||||
socket.inet_aton(addr)
|
||||
# is a valid ip addr
|
||||
except socket.error:
|
||||
# Not a valid ip addr, check if it is an available hostname
|
||||
try:
|
||||
addr = socket.gethostbyname(addr)
|
||||
except socket.gaierror:
|
||||
# Woah, this addr is totally bogus, die!!!
|
||||
err = 'The master address {0} could not be validated, please'\
|
||||
+ ' check that the specified master in the minion config'\
|
||||
+ ' file is correct\n'
|
||||
err = err.format(addr)
|
||||
sys.stderr.write(err)
|
||||
sys.exit(42)
|
||||
return addr
|
326
debian/salt/usr/share/pyshared/salt/crypt.py
vendored
326
debian/salt/usr/share/pyshared/salt/crypt.py
vendored
|
@ -1,326 +0,0 @@
|
|||
'''
|
||||
The crypt module manages all of the cyptogophy functions for minions and
|
||||
masters, encrypting and decrypting payloads, preparing messages, and
|
||||
authenticating peers
|
||||
'''
|
||||
|
||||
# Import python libs
|
||||
import os
|
||||
import sys
|
||||
import hmac
|
||||
import logging
|
||||
import tempfile
|
||||
import random
|
||||
import hashlib
|
||||
import time
|
||||
import string
|
||||
import cPickle as pickle
|
||||
# Import Cryptography libs
|
||||
from M2Crypto import RSA
|
||||
from Crypto.Cipher import AES
|
||||
# Import zeromq libs
|
||||
import zmq
|
||||
# Import salt utils
|
||||
import salt.utils
|
||||
import salt.payload
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
def foo_pass(self, data=''):
|
||||
'''
|
||||
used as a workaround for the no-passphrase issue in M2Crypto.RSA
|
||||
'''
|
||||
return 'foo'
|
||||
|
||||
def gen_keys(keydir, keyname, keysize):
|
||||
'''
|
||||
Generate a keypair for use with salt
|
||||
'''
|
||||
base = os.path.join(keydir, keyname)
|
||||
priv = '{0}.pem'.format(base)
|
||||
pub = '{0}.pub'.format(base)
|
||||
gen = RSA.gen_key(keysize, 1)
|
||||
gen.save_key(priv, callback=foo_pass)
|
||||
gen.save_pub_key(pub)
|
||||
key = RSA.load_key(priv, callback=foo_pass)
|
||||
os.chmod(priv, 256)
|
||||
return key
|
||||
|
||||
class MasterKeys(dict):
|
||||
'''
|
||||
The Master Keys class is used to manage the public key pair used for
|
||||
authentication by the master.
|
||||
'''
|
||||
def __init__(self, opts):
|
||||
self.opts = opts
|
||||
self.pub_path = os.path.join(self.opts['pki_dir'], 'master.pub')
|
||||
self.rsa_path = os.path.join(self.opts['pki_dir'], 'master.pem')
|
||||
self.key = self.__get_priv_key()
|
||||
self.pub_str = self.__get_pub_str()
|
||||
self.token = self.__gen_token()
|
||||
|
||||
def __get_priv_key(self):
|
||||
'''
|
||||
Returns a private key object for the master
|
||||
'''
|
||||
key = None
|
||||
try:
|
||||
key = RSA.load_key(self.rsa_path, callback=foo_pass)
|
||||
log.debug('Loaded master key: %s', self.rsa_path)
|
||||
except:
|
||||
log.info('Generating master key: %s', self.rsa_path)
|
||||
key = gen_keys(self.opts['pki_dir'], 'master', 4096)
|
||||
return key
|
||||
|
||||
def __get_pub_str(self):
|
||||
'''
|
||||
Returns the string contents of the public key
|
||||
'''
|
||||
if not os.path.isfile(self.pub_path):
|
||||
key = self.__get_priv_key()
|
||||
key.save_pub_key(self.pub_path)
|
||||
return open(self.pub_path, 'r').read()
|
||||
|
||||
def __gen_token(self):
|
||||
'''
|
||||
Generate the authentication token
|
||||
'''
|
||||
return self.key.private_encrypt('salty bacon', 5)
|
||||
|
||||
|
||||
class Auth(object):
|
||||
'''
|
||||
The Auth class provides the sequence for setting up communication with the
|
||||
master server from a minion.
|
||||
'''
|
||||
def __init__(self, opts):
|
||||
self.opts = opts
|
||||
self.rsa_path = os.path.join(self.opts['pki_dir'], 'minion.pem')
|
||||
if self.opts.has_key('syndic_master'):
|
||||
self.mpub = 'syndic_master.pub'
|
||||
elif self.opts.has_key('alert_master'):
|
||||
self.mpub = 'monitor_master.pub'
|
||||
else:
|
||||
self.mpub = 'minion_master.pub'
|
||||
|
||||
def get_priv_key(self):
|
||||
'''
|
||||
Returns a private key object for the minion
|
||||
'''
|
||||
key = None
|
||||
try:
|
||||
key = RSA.load_key(self.rsa_path, callback=foo_pass)
|
||||
log.debug('Loaded minion key: %s', self.rsa_path)
|
||||
except:
|
||||
log.info('Generating minion key: %s', self.rsa_path)
|
||||
key = gen_keys(self.opts['pki_dir'], 'minion', 4096)
|
||||
return key
|
||||
|
||||
def minion_sign_in_payload(self):
|
||||
'''
|
||||
Generates the payload used to authenticate with the master server. This
|
||||
payload consists of the passed in id_ and the ssh public key to encrypt
|
||||
the AES key sent back form the master.
|
||||
'''
|
||||
payload = {}
|
||||
key = self.get_priv_key()
|
||||
tmp_pub = tempfile.mktemp()
|
||||
key.save_pub_key(tmp_pub)
|
||||
payload['enc'] = 'clear'
|
||||
payload['load'] = {}
|
||||
payload['load']['cmd'] = '_auth'
|
||||
payload['load']['id'] = self.opts['id']
|
||||
payload['load']['pub'] = open(tmp_pub, 'r').read()
|
||||
os.remove(tmp_pub)
|
||||
return payload
|
||||
|
||||
def decrypt_aes(self, aes):
|
||||
'''
|
||||
This function is used to decrypt the aes seed phrase returned from the
|
||||
master server, the seed phrase is decrypted with the ssh rsa host key.
|
||||
Pass in the encrypted aes key.
|
||||
Returns the decrypted aes seed key, a string
|
||||
'''
|
||||
log.info('Decrypting the current master AES key')
|
||||
key = self.get_priv_key()
|
||||
return key.private_decrypt(aes, 4)
|
||||
|
||||
def verify_master(self, master_pub, token):
|
||||
'''
|
||||
Takes the master pubkey and compares it to the saved master pubkey,
|
||||
the token is encrypted with the master private key and must be
|
||||
decrypted successfully to verify that the master has been connected to.
|
||||
The token must decrypt with the public key, and it must say:
|
||||
'salty bacon'
|
||||
returns a bool
|
||||
'''
|
||||
tmp_pub = tempfile.mktemp()
|
||||
open(tmp_pub, 'w+').write(master_pub)
|
||||
m_pub_fn = os.path.join(self.opts['pki_dir'], self.mpub)
|
||||
pub = RSA.load_pub_key(tmp_pub)
|
||||
os.remove(tmp_pub)
|
||||
if os.path.isfile(m_pub_fn) and not self.opts['open_mode']:
|
||||
local_master_pub = open(m_pub_fn).read()
|
||||
if not master_pub == local_master_pub:
|
||||
# This is not the last master we connected to
|
||||
log.error('The master key has changed, the salt master could '
|
||||
'have been subverted, verify salt master\'s public '
|
||||
'key')
|
||||
return False
|
||||
else:
|
||||
open(m_pub_fn, 'w+').write(master_pub)
|
||||
if pub.public_decrypt(token, 5) == 'salty bacon':
|
||||
return True
|
||||
log.error('The salt master has failed verification for an unknown '
|
||||
'reason, verify your salt keys')
|
||||
return False
|
||||
|
||||
def sign_in(self):
|
||||
'''
|
||||
Send a sign in request to the master, sets the key information and
|
||||
returns a dict containing the master publish interface to bind to
|
||||
and the decrypted aes key for transport decryption.
|
||||
'''
|
||||
auth = {}
|
||||
context = zmq.Context()
|
||||
socket = context.socket(zmq.REQ)
|
||||
socket.connect(self.opts['master_uri'])
|
||||
payload = salt.payload.package(self.minion_sign_in_payload())
|
||||
socket.send(payload)
|
||||
payload = salt.payload.unpackage(socket.recv())
|
||||
if payload.has_key('load'):
|
||||
if payload['load'].has_key('ret'):
|
||||
if not payload['load']['ret']:
|
||||
log.critical(
|
||||
'The Salt Master has rejected this minion\'s public '
|
||||
'key!\nTo repair this issue, delete the public key for '
|
||||
'this minion on the Salt Master and restart this '
|
||||
'minion.\nOr restart the Salt Master in open mode to '
|
||||
'clean out the keys. The Salt Minion will now exit.'
|
||||
)
|
||||
sys.exit(42)
|
||||
else:
|
||||
log.error(
|
||||
'The Salt Master has cached the public key for this '
|
||||
'node, this salt minion will wait for 10 seconds '
|
||||
'before attempting to re-authenticate'
|
||||
)
|
||||
return 'retry'
|
||||
if not self.verify_master(payload['pub_key'], payload['token']):
|
||||
m_pub_fn = os.path.join(self.opts['pki_dir'], self.mpub)
|
||||
log.critical(
|
||||
'The Salt Master server\'s public key did not authenticate!\n'
|
||||
'If you are confident that you are connecting to a valid Salt '
|
||||
'Master, then remove the master public key and restart the '
|
||||
'Salt Minion.\nThe master public key can be found at:\n%s',
|
||||
m_pub_fn
|
||||
)
|
||||
sys.exit(42)
|
||||
auth['aes'] = self.decrypt_aes(payload['aes'])
|
||||
auth['publish_port'] = payload['publish_port']
|
||||
return auth
|
||||
|
||||
|
||||
class AuthenticationError(Exception): pass
|
||||
|
||||
|
||||
class Crypticle(object):
|
||||
'''
|
||||
Authenticated encryption class
|
||||
|
||||
Encryption algorithm: AES-CBC
|
||||
Signing algorithm: HMAC-SHA256
|
||||
'''
|
||||
|
||||
PICKLE_PAD = 'pickle::'
|
||||
AES_BLOCK_SIZE = 16
|
||||
SIG_SIZE = hashlib.sha256().digest_size
|
||||
|
||||
def __init__(self, key_string, key_size=192):
|
||||
self.keys = self.extract_keys(key_string, key_size)
|
||||
self.key_size = key_size
|
||||
|
||||
@classmethod
|
||||
def generate_key_string(cls, key_size=192):
|
||||
key = os.urandom(key_size / 8 + cls.SIG_SIZE)
|
||||
return key.encode('base64').replace('\n', '')
|
||||
|
||||
@classmethod
|
||||
def extract_keys(cls, key_string, key_size):
|
||||
key = key_string.decode('base64')
|
||||
assert len(key) == key_size / 8 + cls.SIG_SIZE, 'invalid key'
|
||||
return key[:-cls.SIG_SIZE], key[-cls.SIG_SIZE:]
|
||||
|
||||
def encrypt(self, data):
|
||||
'''
|
||||
encrypt data with AES-CBC and sign it with HMAC-SHA256
|
||||
'''
|
||||
aes_key, hmac_key = self.keys
|
||||
pad = self.AES_BLOCK_SIZE - len(data) % self.AES_BLOCK_SIZE
|
||||
data = data + pad * chr(pad)
|
||||
iv_bytes = os.urandom(self.AES_BLOCK_SIZE)
|
||||
cypher = AES.new(aes_key, AES.MODE_CBC, iv_bytes)
|
||||
data = iv_bytes + cypher.encrypt(data)
|
||||
sig = hmac.new(hmac_key, data, hashlib.sha256).digest()
|
||||
return data + sig
|
||||
|
||||
def decrypt(self, data):
|
||||
'''
|
||||
verify HMAC-SHA256 signature and decrypt data with AES-CBC
|
||||
'''
|
||||
aes_key, hmac_key = self.keys
|
||||
sig = data[-self.SIG_SIZE:]
|
||||
data = data[:-self.SIG_SIZE]
|
||||
if hmac.new(hmac_key, data, hashlib.sha256).digest() != sig:
|
||||
raise AuthenticationError('message authentication failed')
|
||||
iv_bytes = data[:self.AES_BLOCK_SIZE]
|
||||
data = data[self.AES_BLOCK_SIZE:]
|
||||
cypher = AES.new(aes_key, AES.MODE_CBC, iv_bytes)
|
||||
data = cypher.decrypt(data)
|
||||
return data[:-ord(data[-1])]
|
||||
|
||||
def dumps(self, obj, pickler=pickle):
|
||||
'''
|
||||
pickle and encrypt a python object
|
||||
'''
|
||||
return self.encrypt(self.PICKLE_PAD + pickler.dumps(obj))
|
||||
|
||||
def loads(self, data, pickler=pickle):
|
||||
'''
|
||||
decrypt and un-pickle a python object
|
||||
'''
|
||||
data = self.decrypt(data)
|
||||
# simple integrity check to verify that we got meaningful data
|
||||
assert data.startswith(self.PICKLE_PAD), 'unexpected header'
|
||||
return pickler.loads(data[len(self.PICKLE_PAD):])
|
||||
|
||||
|
||||
class SAuth(Auth):
|
||||
'''
|
||||
Set up an object to maintain the standalone authentication session with
|
||||
the salt master
|
||||
'''
|
||||
def __init__(self, opts):
|
||||
super(SAuth, self).__init__(opts)
|
||||
self.crypticle = self.__authenticate()
|
||||
|
||||
def __authenticate(self):
|
||||
'''
|
||||
Authenticate with the master, this method breaks the functional
|
||||
paradigm, it will update the master information from a fresh sign in,
|
||||
signing in can occur as often as needed to keep up with the revolving
|
||||
master aes key.
|
||||
'''
|
||||
creds = self.sign_in()
|
||||
if creds == 'retry':
|
||||
print 'Failed to authenticate with the master, verify that this'\
|
||||
+ ' minion\'s public key has been accepted on the salt master'
|
||||
sys.exit(2)
|
||||
return Crypticle(creds['aes'])
|
||||
|
||||
def gen_token(self, clear_tok):
|
||||
'''
|
||||
Encrypt a string with the minion private key to verify identity with
|
||||
the master.
|
||||
'''
|
||||
return self.get_priv_key().private_encrypt(clear_tok, 5)
|
|
@ -1,3 +0,0 @@
|
|||
'''
|
||||
Salt extension packages.
|
||||
'''
|
218
debian/salt/usr/share/pyshared/salt/grains/core.py
vendored
218
debian/salt/usr/share/pyshared/salt/grains/core.py
vendored
|
@ -1,218 +0,0 @@
|
|||
'''
|
||||
The static grains, these are the core, or built in grains.
|
||||
|
||||
When grains are loaded they are not loaded in the same way that modules are
|
||||
loaded, grain functions are detected and executed, the functions MUST
|
||||
return a dict which will be applied to the main grains dict. This module
|
||||
will always be executed first, so that any grains loaded here in the core
|
||||
module can be overwritten just by returning dict keys with the same value
|
||||
as those returned here
|
||||
'''
|
||||
# This needs some refactoring, I made it "as fast as I could" and could be a
|
||||
# lot clearer, so far it is spaghetti code
|
||||
# Import python modules
|
||||
import os
|
||||
import socket
|
||||
import subprocess
|
||||
|
||||
def _kernel():
|
||||
'''
|
||||
Return the kernel type
|
||||
'''
|
||||
# Provides:
|
||||
# kernel
|
||||
grains = {}
|
||||
grains['kernel'] = subprocess.Popen(['uname', '-s'],
|
||||
stdout=subprocess.PIPE).communicate()[0].strip()
|
||||
if grains['kernel'] == 'aix':
|
||||
grains['kernelrelease'] = subprocess.Popen(['oslevel', '-s'],
|
||||
stdout=subprocess.PIPE).communicate()[0].strip()
|
||||
else:
|
||||
grains['kernelrelease'] = subprocess.Popen(['uname', '-r'],
|
||||
stdout=subprocess.PIPE).communicate()[0].strip()
|
||||
if not grains.has_key('kernel'):
|
||||
grains['kernel'] = 'Unknown'
|
||||
if not grains['kernel']:
|
||||
grains['kernel'] = 'Unknown'
|
||||
return grains
|
||||
|
||||
def _cpudata():
|
||||
'''
|
||||
Return the cpu architecture
|
||||
'''
|
||||
# Provides:
|
||||
# cpuarch
|
||||
# num_cpus
|
||||
# cpu_model
|
||||
# cpu_flags
|
||||
grains = {}
|
||||
cpuinfo = '/proc/cpuinfo'
|
||||
# Grab the Arch
|
||||
arch = subprocess.Popen(['uname', '-m'],
|
||||
stdout=subprocess.PIPE).communicate()[0].strip()
|
||||
grains['cpuarch'] = arch
|
||||
if not grains['cpuarch']:
|
||||
grains['cpuarch'] = 'Unknown'
|
||||
# Parse over the cpuinfo file
|
||||
if os.path.isfile(cpuinfo):
|
||||
for line in open(cpuinfo, 'r').readlines():
|
||||
comps = line.split(':')
|
||||
if not len(comps) > 1:
|
||||
continue
|
||||
if comps[0].strip() == 'processor':
|
||||
grains['num_cpus'] = int(comps[1].strip()) + 1
|
||||
elif comps[0].strip() == 'model name':
|
||||
grains['cpu_model'] = comps[1].strip()
|
||||
elif comps[0].strip() == 'flags':
|
||||
grains['cpu_flags'] = comps[1].split()
|
||||
if not grains.has_key('num_cpus'):
|
||||
grains['num_cpus'] = 0
|
||||
if not grains.has_key('cpu_model'):
|
||||
grains['cpu_model'] = 'Unknown'
|
||||
if not grains.has_key('cpu_flags'):
|
||||
grains['cpu_flags'] = []
|
||||
return grains
|
||||
|
||||
def _virtual(osdata):
|
||||
'''
|
||||
Returns what type of virtual hardware is under the hood, kvm or physical
|
||||
'''
|
||||
# This is going to be a monster, if you are running a vm you can test this
|
||||
# grain with please submit patches!
|
||||
# Provides:
|
||||
# virtual
|
||||
grains = {'virtual': 'physical'}
|
||||
if 'Linux FreeBSD OpenBSD SunOS HP-UX GNU/kFreeBSD'.count(osdata['kernel']):
|
||||
if os.path.isdir('/proc/vz'):
|
||||
if os.path.isfile('/proc/vz/version'):
|
||||
grains['virtual'] = 'openvzhn'
|
||||
else:
|
||||
grains['virtual'] = 'openvzve'
|
||||
if os.path.isdir('/.SUNWnative'):
|
||||
grains['virtual'] = 'zone'
|
||||
if os.path.isfile('/proc/cpuinfo'):
|
||||
if open('/proc/cpuinfo', 'r').read().count('QEMU Virtual CPU'):
|
||||
grains['virtual'] = 'kvm'
|
||||
return grains
|
||||
|
||||
def _ps(osdata):
|
||||
'''
|
||||
Return the ps grain
|
||||
'''
|
||||
grains = {}
|
||||
grains['ps'] = 'ps auxwww' if\
|
||||
'FreeBSD NetBSD OpenBSD Darwin'.count(osdata['os']) else 'ps -ef'
|
||||
return grains
|
||||
|
||||
def os_data():
|
||||
'''
|
||||
Return grains pertaining to the operating system
|
||||
'''
|
||||
grains = {}
|
||||
grains.update(_kernel())
|
||||
grains.update(_cpudata())
|
||||
if grains['kernel'] == 'Linux':
|
||||
if os.path.isfile('/etc/arch-release'):
|
||||
grains['os'] = 'Arch'
|
||||
elif os.path.isfile('/etc/debian_version'):
|
||||
grains['os'] = 'Debian'
|
||||
elif os.path.isfile('/etc/gentoo-version'):
|
||||
grains['os'] = 'Gentoo'
|
||||
elif os.path.isfile('/etc/fedora-version'):
|
||||
grains['os'] = 'Fedora'
|
||||
elif os.path.isfile('/etc/mandriva-version'):
|
||||
grains['os'] = 'Mandriva'
|
||||
elif os.path.isfile('/etc/mandrake-version'):
|
||||
grains['os'] = 'Mandrake'
|
||||
elif os.path.isfile('/etc/meego-version'):
|
||||
grains['os'] = 'MeeGo'
|
||||
elif os.path.isfile('/etc/vmware-version'):
|
||||
grains['os'] = 'VMWareESX'
|
||||
elif os.path.isfile('/etc/bluewhite64-version'):
|
||||
grains['os'] = 'Bluewhite64'
|
||||
elif os.path.isfile('/etc/slamd64-version'):
|
||||
grains['os'] = 'Slamd64'
|
||||
elif os.path.isfile('/etc/slackware-version'):
|
||||
grains['os'] = 'Slackware'
|
||||
elif os.path.isfile('/etc/enterprise-release'):
|
||||
if os.path.isfile('/etc/ovs-release'):
|
||||
grains['os'] = 'OVS'
|
||||
else:
|
||||
grains['os'] = 'OEL'
|
||||
elif os.path.isfile('/etc/redhat-release'):
|
||||
data = open('/etc/redhat-release', 'r').read()
|
||||
if data.count('centos'):
|
||||
grains['os'] = 'CentOS'
|
||||
elif data.count('scientific'):
|
||||
grains['os'] = 'Scientific'
|
||||
else:
|
||||
grains['os'] = 'RedHat'
|
||||
elif os.path.isfile('/etc/SuSE-release'):
|
||||
data = open('/etc/SuSE-release', 'r').read()
|
||||
if data.count('SUSE LINUX Enterprise Server'):
|
||||
grains['os'] = 'SLES'
|
||||
elif data.count('SUSE LINUX Enterprise Desktop'):
|
||||
grains['os'] = 'SLED'
|
||||
elif data.count('openSUSE'):
|
||||
grains['os'] = 'openSUSE'
|
||||
else:
|
||||
grains['os'] = 'SUSE'
|
||||
elif grains['kernel'] == 'sunos':
|
||||
grains['os'] = 'Solaris'
|
||||
elif grains['kernel'] == 'VMkernel':
|
||||
grains['os'] = 'ESXi'
|
||||
elif grains['kernel'] == 'Darwin':
|
||||
grains['os'] = 'MacOS'
|
||||
else:
|
||||
grains['os'] = grains['kernel']
|
||||
|
||||
# Load the virtual machine info
|
||||
|
||||
grains.update(_virtual(grains))
|
||||
grains.update(_ps(grains))
|
||||
return grains
|
||||
|
||||
def hostname():
|
||||
'''
|
||||
Return fqdn, hostname, domainname
|
||||
'''
|
||||
# This is going to need some work
|
||||
# Provides:
|
||||
# fqdn
|
||||
# host
|
||||
# domain
|
||||
grains = {}
|
||||
grains['fqdn'] = socket.getfqdn()
|
||||
comps = grains['fqdn'].split('.')
|
||||
grains['host'] = comps[0]
|
||||
if len(comps) > 1:
|
||||
grains['domain'] = '.'.join(comps[1:])
|
||||
else:
|
||||
grains['domain'] = ''
|
||||
return grains
|
||||
|
||||
def path():
|
||||
'''
|
||||
Return the path
|
||||
'''
|
||||
# Provides:
|
||||
# path
|
||||
return {'path': os.environ['PATH'].strip()}
|
||||
|
||||
def memdata():
|
||||
'''
|
||||
Gather information about the system memory
|
||||
'''
|
||||
# Provides:
|
||||
# mem_total
|
||||
grains = {'mem_total': 0}
|
||||
meminfo = '/proc/meminfo'
|
||||
if os.path.isfile(meminfo):
|
||||
for line in open(meminfo, 'r').readlines():
|
||||
comps = line.split(':')
|
||||
if not len(comps) > 1:
|
||||
continue
|
||||
if comps[0].strip() == 'MemTotal':
|
||||
grains['mem_total'] = int(comps[1].split()[0])/1024
|
||||
return grains
|
||||
|
338
debian/salt/usr/share/pyshared/salt/loader.py
vendored
338
debian/salt/usr/share/pyshared/salt/loader.py
vendored
|
@ -1,338 +0,0 @@
|
|||
'''
|
||||
Routines to set up a minion
|
||||
'''
|
||||
# This module still needs package support, so that the functions dict returned
|
||||
# can send back functions like: foo.bar.baz
|
||||
|
||||
# Import python libs
|
||||
import os
|
||||
import sys
|
||||
import imp
|
||||
import logging
|
||||
import salt
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
salt_base_path = os.path.dirname(salt.__file__)
|
||||
|
||||
def minion_mods(opts):
|
||||
'''
|
||||
Returns the minion modules
|
||||
'''
|
||||
extra_dirs = []
|
||||
if opts.has_key('module_dirs'):
|
||||
extra_dirs = opts['module_dirs']
|
||||
module_dirs = [
|
||||
os.path.join(salt_base_path, 'modules'),
|
||||
] + extra_dirs
|
||||
load = Loader(module_dirs, opts)
|
||||
return load.apply_introspection(load.gen_functions())
|
||||
|
||||
def returners(opts):
|
||||
'''
|
||||
Returns the returner modules
|
||||
'''
|
||||
extra_dirs = []
|
||||
if opts.has_key('returner_dirs'):
|
||||
extra_dirs = opts['returner_dirs']
|
||||
module_dirs = [
|
||||
os.path.join(salt_base_path, 'returners'),
|
||||
] + extra_dirs
|
||||
load = Loader(module_dirs, opts)
|
||||
return load.filter_func('returner')
|
||||
|
||||
def states(opts, functions):
|
||||
'''
|
||||
Returns the returner modules
|
||||
'''
|
||||
extra_dirs = []
|
||||
if opts.has_key('states_dirs'):
|
||||
extra_dirs = opts['states_dirs']
|
||||
module_dirs = [
|
||||
os.path.join(salt_base_path, 'states'),
|
||||
] + extra_dirs
|
||||
load = Loader(module_dirs, opts)
|
||||
pack = {'name': '__salt__',
|
||||
'value': functions}
|
||||
return load.gen_functions(pack)
|
||||
|
||||
def render(opts, functions):
|
||||
'''
|
||||
Returns the render modules
|
||||
'''
|
||||
extra_dirs = []
|
||||
if opts.has_key('render_dirs'):
|
||||
extra_dirs = opts['render_dirs']
|
||||
module_dirs = [
|
||||
os.path.join(salt_base_path, 'renderers'),
|
||||
] + extra_dirs
|
||||
load = Loader(module_dirs, opts)
|
||||
pack = {'name': '__salt__',
|
||||
'value': functions}
|
||||
return load.filter_func('render', pack)
|
||||
|
||||
def grains(opts):
|
||||
'''
|
||||
Return the functions for the dynamic grains and the values for the static
|
||||
grains.
|
||||
'''
|
||||
module_dirs = [
|
||||
os.path.join(salt_base_path, 'grains'),
|
||||
]
|
||||
load = Loader(module_dirs, opts)
|
||||
grains = load.gen_grains()
|
||||
if opts.has_key('grains'):
|
||||
grains.update(opts['grains'])
|
||||
return grains
|
||||
|
||||
def call(fun, args=[], dirs=[]):
|
||||
'''
|
||||
Directly call a function inside a loader directory
|
||||
'''
|
||||
module_dirs = [
|
||||
os.path.join(salt_base_path, 'modules'),
|
||||
] + dirs
|
||||
load = Loader(module_dirs)
|
||||
return load.call(fun, args)
|
||||
|
||||
def runner(opts):
|
||||
'''
|
||||
Directly call a function inside a loader directory
|
||||
'''
|
||||
module_dirs = [
|
||||
os.path.join(salt_base_path, 'runners'),
|
||||
]
|
||||
load = Loader(module_dirs, opts)
|
||||
return load.gen_functions()
|
||||
|
||||
|
||||
class Loader(object):
|
||||
'''
|
||||
Used to load in arbitrary modules from a directory, the Loader can also be
|
||||
used to only load specific functions from a directory, or to call modules
|
||||
in an arbitrary directory directly.
|
||||
'''
|
||||
def __init__(self, module_dirs, opts={}):
|
||||
self.module_dirs = module_dirs
|
||||
if opts.has_key('grains'):
|
||||
self.grains = opts['grains']
|
||||
else:
|
||||
self.grains = {}
|
||||
self.opts = self.__prep_mod_opts(opts)
|
||||
|
||||
def __prep_mod_opts(self, opts):
|
||||
'''
|
||||
Strip out of the opts any logger instance
|
||||
'''
|
||||
mod_opts = {}
|
||||
for key, val in opts.items():
|
||||
if key in ('logger', 'grains'):
|
||||
continue
|
||||
mod_opts[key] = val
|
||||
return mod_opts
|
||||
|
||||
def get_docs(self, funcs, module=''):
|
||||
'''
|
||||
Return a dict containing all of the doc strings in the functions dict
|
||||
'''
|
||||
docs = {}
|
||||
for fun in funcs:
|
||||
if fun.startswith(module):
|
||||
docs[fun] = funcs[fun].__doc__
|
||||
return docs
|
||||
|
||||
def call(self, fun, arg=[]):
|
||||
'''
|
||||
Call a function in the load path.
|
||||
'''
|
||||
name = fun[:fun.rindex('.')]
|
||||
try:
|
||||
fn_, path, desc = imp.find_module(name, self.module_dirs)
|
||||
mod = imp.load_module(name, fn_, path, desc)
|
||||
except ImportError:
|
||||
if self.opts.get('cython_enable', True) is True:
|
||||
# The module was not found, try to find a cython module
|
||||
try:
|
||||
import pyximport
|
||||
pyximport.install()
|
||||
|
||||
for mod_dir in self.module_dirs:
|
||||
for fn_ in os.listdir(mod_dir):
|
||||
if name == fn_[:fn_.rindex('.')]:
|
||||
# Found it, load the mod and break the loop
|
||||
mod = pyximport.load_module(
|
||||
name, os.path.join(mod_dir, fn_)
|
||||
)
|
||||
return getattr(
|
||||
mod, fun[fun.rindex('.') + 1:])(*arg)
|
||||
except ImportError:
|
||||
log.info("Cython is enabled in options though it's not "
|
||||
"present in the system path. Skipping Cython "
|
||||
"modules.")
|
||||
return getattr(mod, fun[fun.rindex('.') + 1:])(*arg)
|
||||
|
||||
def gen_functions(self, pack=None):
|
||||
'''
|
||||
Return a dict of functions found in the defined module_dirs
|
||||
'''
|
||||
names = {}
|
||||
modules = []
|
||||
funcs = {}
|
||||
|
||||
cython_enabled = False
|
||||
if self.opts.get('cython_enable', True) is True:
|
||||
try:
|
||||
import pyximport
|
||||
pyximport.install()
|
||||
cython_enabled = True
|
||||
except ImportError:
|
||||
log.info("Cython is enabled in options though it's not present "
|
||||
"in the system path. Skipping Cython modules.")
|
||||
for mod_dir in self.module_dirs:
|
||||
if not mod_dir.startswith('/'):
|
||||
continue
|
||||
if not os.path.isdir(mod_dir):
|
||||
continue
|
||||
for fn_ in os.listdir(mod_dir):
|
||||
if fn_.startswith('_'):
|
||||
continue
|
||||
if fn_.endswith('.py')\
|
||||
or fn_.endswith('.pyc')\
|
||||
or fn_.endswith('.pyo')\
|
||||
or fn_.endswith('.so')\
|
||||
or (cython_enabled and fn_.endswith('.pyx')):
|
||||
names[fn_[:fn_.rindex('.')]] = os.path.join(mod_dir, fn_)
|
||||
for name in names:
|
||||
try:
|
||||
if names[name].endswith('.pyx'):
|
||||
# If there's a name which ends in .pyx it means the above
|
||||
# cython_enabled is True. Continue...
|
||||
mod = pyximport.load_module(name, names[name], '/tmp')
|
||||
else:
|
||||
fn_, path, desc = imp.find_module(name, self.module_dirs)
|
||||
mod = imp.load_module(name, fn_, path, desc)
|
||||
except ImportError:
|
||||
continue
|
||||
modules.append(mod)
|
||||
for mod in modules:
|
||||
virtual = ''
|
||||
if hasattr(mod, '__opts__'):
|
||||
mod.__opts__.update(self.opts)
|
||||
else:
|
||||
mod.__opts__ = self.opts
|
||||
|
||||
mod.__grains__ = self.grains
|
||||
|
||||
if pack:
|
||||
if type(pack) == type(list()):
|
||||
for chunk in pack:
|
||||
setattr(mod, chunk['name'], chunk['value'])
|
||||
else:
|
||||
setattr(mod, pack['name'], pack['value'])
|
||||
|
||||
if hasattr(mod, '__virtual__'):
|
||||
if callable(mod.__virtual__):
|
||||
virtual = mod.__virtual__()
|
||||
|
||||
for attr in dir(mod):
|
||||
if attr.startswith('_'):
|
||||
continue
|
||||
if callable(getattr(mod, attr)):
|
||||
if virtual:
|
||||
func = getattr(mod, attr)
|
||||
funcs[virtual + '.' + attr] = func
|
||||
self._apply_outputter(func, mod)
|
||||
elif virtual == False:
|
||||
pass
|
||||
else:
|
||||
func = getattr(mod, attr)
|
||||
funcs[mod.__name__ + '.' + attr] = func
|
||||
self._apply_outputter(func, mod)
|
||||
for mod in modules:
|
||||
if not hasattr(mod, '__salt__'):
|
||||
mod.__salt__ = funcs
|
||||
return funcs
|
||||
|
||||
def _apply_outputter(self, func, mod):
|
||||
'''
|
||||
Apply the __outputter__ variable to the functions
|
||||
'''
|
||||
if hasattr(mod, '__outputter__'):
|
||||
outp = mod.__outputter__
|
||||
if outp.has_key(func.__name__):
|
||||
func.__outputter__ = outp[func.__name__]
|
||||
|
||||
def apply_introspection(self, funcs):
|
||||
'''
|
||||
Pass in a function object returned from get_functions to load in
|
||||
introspection functions.
|
||||
'''
|
||||
funcs['sys.list_functions'] = lambda: self.list_funcs(funcs)
|
||||
funcs['sys.list_modules'] = lambda: self.list_modules(funcs)
|
||||
funcs['sys.doc'] = lambda module = '': self.get_docs(funcs, module)
|
||||
return funcs
|
||||
|
||||
def list_funcs(self, funcs):
|
||||
'''
|
||||
List the functions
|
||||
'''
|
||||
return funcs.keys()
|
||||
|
||||
def list_modules(self, funcs):
|
||||
'''
|
||||
List the modules
|
||||
'''
|
||||
modules = set()
|
||||
for key in funcs:
|
||||
comps = key.split('.')
|
||||
if len(comps) < 2:
|
||||
continue
|
||||
modules.add(comps[0])
|
||||
return sorted(list(modules))
|
||||
|
||||
def filter_func(self, name, pack=None):
|
||||
'''
|
||||
Filter a specific function out of the functions, this is used to load
|
||||
the returners for the salt minion
|
||||
'''
|
||||
funcs = {}
|
||||
gen = self.gen_functions(pack) if pack else self.gen_functions()
|
||||
for key, fun in gen.items():
|
||||
if key[key.index('.') + 1:] == name:
|
||||
funcs[key[:key.index('.')]] = fun
|
||||
return funcs
|
||||
|
||||
def chop_mods(self):
|
||||
'''
|
||||
Chop off the module names so that the raw functions are exposed, used
|
||||
to generate the grains
|
||||
'''
|
||||
funcs = {}
|
||||
for key, fun in self.gen_functions().items():
|
||||
funcs[key[key.rindex('.')] + 1:] = fun
|
||||
return funcs
|
||||
|
||||
def gen_grains(self):
|
||||
'''
|
||||
Read the grains directory and execute all of the public callable
|
||||
members. then verify that the returns are python dict's and return a
|
||||
dict containing all of the returned values.
|
||||
'''
|
||||
grains = {}
|
||||
funcs = self.gen_functions()
|
||||
for key, fun in funcs.items():
|
||||
if not key[key.index('.') + 1:] == 'core':
|
||||
continue
|
||||
ret = fun()
|
||||
if not type(ret) == type(dict()):
|
||||
continue
|
||||
grains.update(ret)
|
||||
for key, fun in funcs.items():
|
||||
if key[key.index('.') + 1:] == 'core':
|
||||
continue
|
||||
ret = fun()
|
||||
if not type(ret) == type(dict()):
|
||||
continue
|
||||
grains.update(ret)
|
||||
return grains
|
||||
|
104
debian/salt/usr/share/pyshared/salt/log.py
vendored
104
debian/salt/usr/share/pyshared/salt/log.py
vendored
|
@ -1,104 +0,0 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
salt.log
|
||||
~~~~~~~~
|
||||
|
||||
This is were Salt's logging get's setup.
|
||||
|
||||
|
||||
:copyright: © 2011 :email:`Pedro Algarvio (pedro@algarvio.me)`
|
||||
:license: Apache 2.0, see LICENSE for more details.
|
||||
"""
|
||||
|
||||
import logging
|
||||
import logging.handlers
|
||||
|
||||
TRACE = 5
|
||||
GARBAGE = 1
|
||||
|
||||
LOG_LEVELS = {
|
||||
'none': logging.NOTSET,
|
||||
'info': logging.INFO,
|
||||
'warn': logging.WARNING,
|
||||
'warning': logging.WARNING,
|
||||
'error': logging.ERROR,
|
||||
'none': logging.CRITICAL,
|
||||
'debug': logging.DEBUG,
|
||||
'trace': TRACE,
|
||||
'garbage': GARBAGE
|
||||
}
|
||||
|
||||
LoggingLoggerClass = logging.getLoggerClass()
|
||||
|
||||
class Logging(LoggingLoggerClass):
|
||||
def garbage(self, msg, *args, **kwargs):
|
||||
return LoggingLoggerClass.log(self, 1, msg, *args, **kwargs)
|
||||
|
||||
def trace(self, msg, *args, **kwargs):
|
||||
return LoggingLoggerClass.log(self, 5, msg, *args, **kwargs)
|
||||
|
||||
def getLogger(name):
|
||||
init()
|
||||
return logging.getLogger(name)
|
||||
|
||||
def init():
|
||||
'''
|
||||
Replace the default system logger with a version that includes trace()
|
||||
and garbage() methods.
|
||||
'''
|
||||
if logging.getLoggerClass() is not Logging:
|
||||
logging.setLoggerClass(Logging)
|
||||
logging.addLevelName(5, 'TRACE')
|
||||
logging.addLevelName(1, 'GARBAGE')
|
||||
# Set the root logger at the lowest level possible
|
||||
logging.getLogger().setLevel(1)
|
||||
|
||||
def setup_console_logger(log_level):
|
||||
'''
|
||||
Setup the console logger
|
||||
'''
|
||||
init()
|
||||
level = LOG_LEVELS.get(log_level.lower(), logging.ERROR)
|
||||
|
||||
rootLogger = logging.getLogger()
|
||||
handler = logging.StreamHandler()
|
||||
|
||||
handler.setLevel(level)
|
||||
formatter = logging.Formatter(
|
||||
'%(asctime)s,%(msecs)03.0f [%(name)-15s][%(levelname)-8s] %(message)s',
|
||||
datefmt="%H:%M:%S"
|
||||
)
|
||||
|
||||
handler.setFormatter(formatter)
|
||||
rootLogger.addHandler(handler)
|
||||
|
||||
|
||||
def setup_logfile_logger(log_path, log_level):
|
||||
'''
|
||||
Setup the logfile logger
|
||||
'''
|
||||
init()
|
||||
level = LOG_LEVELS.get(log_level.lower(), logging.ERROR)
|
||||
|
||||
rootLogger = logging.getLogger()
|
||||
handler = getattr(
|
||||
logging.handlers, 'WatchedFileHandler', logging.FileHandler)(
|
||||
log_path, 'a', 'utf-8', delay=0
|
||||
)
|
||||
|
||||
handler.setLevel(level)
|
||||
formatter = logging.Formatter(
|
||||
'%(asctime)s [%(name)-15s][%(levelname)-8s] %(message)s',
|
||||
)
|
||||
|
||||
handler.setFormatter(formatter)
|
||||
rootLogger.addHandler(handler)
|
||||
|
||||
def set_logger_level(logger_name, log_level):
|
||||
'''
|
||||
Tweak a specific logger's logging level
|
||||
'''
|
||||
init()
|
||||
logging.getLogger(logger_name).setLevel(
|
||||
LOG_LEVELS.get(log_level.lower(), logging.ERROR)
|
||||
)
|
699
debian/salt/usr/share/pyshared/salt/master.py
vendored
699
debian/salt/usr/share/pyshared/salt/master.py
vendored
|
@ -1,699 +0,0 @@
|
|||
'''
|
||||
This module contains all fo the routines needed to set up a master server, this
|
||||
involves preparing the three listeners and the workers needed by the master.
|
||||
'''
|
||||
# Import python modules
|
||||
import os
|
||||
import re
|
||||
import shutil
|
||||
import hashlib
|
||||
import logging
|
||||
import tempfile
|
||||
import multiprocessing
|
||||
import time
|
||||
import datetime
|
||||
import cPickle as pickle
|
||||
# Import zeromq
|
||||
import zmq
|
||||
# Import salt modules
|
||||
import salt.utils
|
||||
import salt.crypt
|
||||
import salt.payload
|
||||
import salt.client
|
||||
# Import cryptography modules
|
||||
from M2Crypto import RSA
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
def prep_jid(cachedir, load):
|
||||
'''
|
||||
Parses the job return directory, generates a job id and sets up the
|
||||
job id directory.
|
||||
'''
|
||||
jid_root = os.path.join(cachedir, 'jobs')
|
||||
jid = datetime.datetime.strftime(
|
||||
datetime.datetime.now(), '%Y%m%d%H%M%S%f'
|
||||
)
|
||||
jid_dir = os.path.join(jid_root, jid)
|
||||
if not os.path.isdir(jid_dir):
|
||||
os.makedirs(jid_dir)
|
||||
pickle.dump(load, open(os.path.join(jid_dir, '.load.p'), 'w+'))
|
||||
else:
|
||||
return prep_jid(load)
|
||||
return jid
|
||||
|
||||
|
||||
class SMaster(object):
|
||||
'''
|
||||
Create a simple salt-master, this will generate the top level master
|
||||
'''
|
||||
def __init__(self, opts):
|
||||
'''
|
||||
Create a salt master server instance
|
||||
'''
|
||||
self.opts = opts
|
||||
self.master_key = salt.crypt.MasterKeys(self.opts)
|
||||
self.key = self.__prep_key()
|
||||
self.crypticle = self.__prep_crypticle()
|
||||
|
||||
def __prep_crypticle(self):
|
||||
'''
|
||||
Return the crypticle used for AES
|
||||
'''
|
||||
return salt.crypt.Crypticle(self.opts['aes'])
|
||||
|
||||
def __prep_key(self):
|
||||
'''
|
||||
A key needs to be placed in the filesystem with permissions 0400 so
|
||||
clients are required to run as root.
|
||||
'''
|
||||
log.info('Preparing the root key for local communication')
|
||||
keyfile = os.path.join(self.opts['cachedir'], '.root_key')
|
||||
key = salt.crypt.Crypticle.generate_key_string()
|
||||
if os.path.isfile(keyfile):
|
||||
return open(keyfile, 'r').read()
|
||||
open(keyfile, 'w+').write(key)
|
||||
os.chmod(keyfile, 256)
|
||||
return key
|
||||
|
||||
|
||||
class Master(SMaster):
|
||||
'''
|
||||
The salt master server
|
||||
'''
|
||||
def __init__(self, opts):
|
||||
'''
|
||||
Create a salt master server instance
|
||||
'''
|
||||
SMaster.__init__(self, opts)
|
||||
|
||||
def _clear_old_jobs(self):
|
||||
'''
|
||||
Clean out the old jobs
|
||||
'''
|
||||
while True:
|
||||
cur = datetime.datetime.strftime(
|
||||
datetime.datetime.now(), '%Y%m%d%H'
|
||||
)
|
||||
if self.opts['keep_jobs'] == 0:
|
||||
return
|
||||
jid_root = os.path.join(self.opts['cachedir'], 'jobs')
|
||||
for jid in os.listdir(jid_root):
|
||||
if int(cur) - int(jid[:10]) > self.opts['keep_jobs']:
|
||||
shutil.rmtree(os.path.join(jid_root, jid))
|
||||
time.sleep(60)
|
||||
|
||||
def start(self):
|
||||
'''
|
||||
Turn on the master server components
|
||||
'''
|
||||
log.info('Starting the Salt Master')
|
||||
multiprocessing.Process(target=self._clear_old_jobs).start()
|
||||
aes_funcs = AESFuncs(self.opts, self.crypticle)
|
||||
clear_funcs = ClearFuncs(
|
||||
self.opts,
|
||||
self.key,
|
||||
self.master_key,
|
||||
self.crypticle)
|
||||
reqserv = ReqServer(
|
||||
self.opts,
|
||||
self.crypticle,
|
||||
self.key,
|
||||
self.master_key,
|
||||
aes_funcs,
|
||||
clear_funcs)
|
||||
reqserv.start_publisher()
|
||||
reqserv.run()
|
||||
|
||||
|
||||
class Publisher(multiprocessing.Process):
|
||||
'''
|
||||
The publishing interface, a simple zeromq publisher that sends out the
|
||||
commands.
|
||||
'''
|
||||
def __init__(self, opts):
|
||||
multiprocessing.Process.__init__(self)
|
||||
self.opts = opts
|
||||
|
||||
def run(self):
|
||||
'''
|
||||
Bind to the interface specified in the configuration file
|
||||
'''
|
||||
context = zmq.Context(1)
|
||||
pub_sock = context.socket(zmq.PUB)
|
||||
pull_sock = context.socket(zmq.PULL)
|
||||
pub_uri = 'tcp://{0[interface]}:{0[publish_port]}'.format(self.opts)
|
||||
pull_uri = 'ipc://{0}'.format(
|
||||
os.path.join(self.opts['sock_dir'], 'publish_pull.ipc')
|
||||
)
|
||||
log.info('Starting the Salt Publisher on %s', pub_uri)
|
||||
pub_sock.bind(pub_uri)
|
||||
pull_sock.bind(pull_uri)
|
||||
|
||||
while True:
|
||||
package = pull_sock.recv()
|
||||
log.info('Publishing command')
|
||||
pub_sock.send(package)
|
||||
|
||||
|
||||
class ReqServer(object):
|
||||
'''
|
||||
Starts up the master request server, minions send results to this
|
||||
interface.
|
||||
'''
|
||||
def __init__(self, opts, crypticle, key, mkey, aes_funcs, clear_funcs):
|
||||
self.opts = opts
|
||||
self.aes_funcs = aes_funcs
|
||||
self.clear_funcs = clear_funcs
|
||||
self.master_key = mkey
|
||||
self.context = zmq.Context(self.opts['worker_threads'])
|
||||
# Prepare the zeromq sockets
|
||||
self.uri = 'tcp://%(interface)s:%(ret_port)s' % self.opts
|
||||
self.clients = self.context.socket(zmq.XREP)
|
||||
self.workers = self.context.socket(zmq.XREQ)
|
||||
self.w_uri = 'ipc://{0}'.format(
|
||||
os.path.join(self.opts['sock_dir'], 'workers.ipc')
|
||||
)
|
||||
# Prepare the aes key
|
||||
self.key = key
|
||||
self.crypticle = crypticle
|
||||
|
||||
def __bind(self):
|
||||
'''
|
||||
Binds the reply server
|
||||
'''
|
||||
log.info('Setting up the master communication server')
|
||||
self.clients.bind(self.uri)
|
||||
|
||||
for ind in range(int(self.opts['worker_threads'])):
|
||||
log.info('Starting Salt worker process {0}'.format(ind))
|
||||
MWorker(self.opts,
|
||||
self.master_key,
|
||||
self.key,
|
||||
self.crypticle,
|
||||
self.aes_funcs,
|
||||
self.clear_funcs).start()
|
||||
|
||||
self.workers.bind(self.w_uri)
|
||||
|
||||
zmq.device(zmq.QUEUE, self.clients, self.workers)
|
||||
|
||||
def start_publisher(self):
|
||||
'''
|
||||
Start the salt publisher interface
|
||||
'''
|
||||
# Start the publisher
|
||||
self.publisher = Publisher(self.opts)
|
||||
self.publisher.start()
|
||||
|
||||
def run(self):
|
||||
'''
|
||||
Start up the ReqServer
|
||||
'''
|
||||
self.__bind()
|
||||
|
||||
|
||||
class MWorker(multiprocessing.Process):
|
||||
'''
|
||||
The worker multiprocess instance to manage the backend operations for the
|
||||
salt master.
|
||||
'''
|
||||
def __init__(self,
|
||||
opts,
|
||||
mkey,
|
||||
key,
|
||||
crypticle,
|
||||
aes_funcs,
|
||||
clear_funcs):
|
||||
multiprocessing.Process.__init__(self)
|
||||
self.opts = opts
|
||||
self.crypticle = crypticle
|
||||
self.aes_funcs = aes_funcs
|
||||
self.clear_funcs = clear_funcs
|
||||
|
||||
def __bind(self):
|
||||
'''
|
||||
Bind to the local port
|
||||
'''
|
||||
context = zmq.Context(1)
|
||||
socket = context.socket(zmq.REP)
|
||||
w_uri = 'ipc://{0}'.format(
|
||||
os.path.join(self.opts['sock_dir'], 'workers.ipc')
|
||||
)
|
||||
log.info('Worker binding to socket {0}'.format(w_uri))
|
||||
socket.connect(w_uri)
|
||||
|
||||
while True:
|
||||
package = socket.recv()
|
||||
payload = salt.payload.unpackage(package)
|
||||
ret = salt.payload.package(self._handle_payload(payload))
|
||||
socket.send(ret)
|
||||
|
||||
def _handle_payload(self, payload):
|
||||
'''
|
||||
The _handle_payload method is the key method used to figure out what
|
||||
needs to be done with communication to the server
|
||||
'''
|
||||
return {'aes': self._handle_aes,
|
||||
'pub': self._handle_pub,
|
||||
'clear': self._handle_clear}[payload['enc']](payload['load'])
|
||||
|
||||
def _handle_clear(self, load):
|
||||
'''
|
||||
Take care of a cleartext command
|
||||
'''
|
||||
log.info('Clear payload received with command %(cmd)s', load)
|
||||
return getattr(self.clear_funcs, load['cmd'])(load)
|
||||
|
||||
def _handle_pub(self, load):
|
||||
'''
|
||||
Handle a command sent via a public key pair
|
||||
'''
|
||||
log.info('Pubkey payload received with command %(cmd)s', load)
|
||||
|
||||
def _handle_aes(self, load):
|
||||
'''
|
||||
Handle a command sent via an aes key
|
||||
'''
|
||||
data = self.crypticle.loads(load)
|
||||
log.info('AES payload received with command %(cmd)s', data)
|
||||
return self.aes_funcs.run_func(data['cmd'], data)
|
||||
|
||||
def run(self):
|
||||
'''
|
||||
Start a Master Worker
|
||||
'''
|
||||
self.__bind()
|
||||
|
||||
|
||||
class AESFuncs(object):
|
||||
'''
|
||||
Set up functions that are available when the load is encrypted with AES
|
||||
'''
|
||||
# The AES Functions:
|
||||
#
|
||||
def __init__(self, opts, crypticle):
|
||||
self.opts = opts
|
||||
self.crypticle = crypticle
|
||||
# Make a client
|
||||
self.local = salt.client.LocalClient(self.opts['conf_file'])
|
||||
|
||||
def __find_file(self, path, env='base'):
|
||||
'''
|
||||
Search the environment for the relative path
|
||||
'''
|
||||
fnd = {'path': '',
|
||||
'rel': ''}
|
||||
if not self.opts['file_roots'].has_key(env):
|
||||
return fnd
|
||||
for root in self.opts['file_roots'][env]:
|
||||
full = os.path.join(root, path)
|
||||
if os.path.isfile(full):
|
||||
fnd['path'] = full
|
||||
fnd['rel'] = path
|
||||
return fnd
|
||||
return fnd
|
||||
|
||||
def __verify_minion(self, id_, token):
|
||||
'''
|
||||
Take a minion id and a string encrypted with the minion private key
|
||||
The string needs to decrypt as 'salt' with the minion public key
|
||||
'''
|
||||
minion_pub = open(
|
||||
os.path.join(
|
||||
self.opts['pki_dir'],
|
||||
'minions',
|
||||
id_
|
||||
),
|
||||
'r'
|
||||
).read()
|
||||
tmp_pub = tempfile.mktemp()
|
||||
open(tmp_pub, 'w+').write(minion_pub)
|
||||
pub = RSA.load_pub_key(tmp_pub)
|
||||
os.remove(tmp_pub)
|
||||
if pub.public_decrypt(token, 5) == 'salt':
|
||||
return True
|
||||
log.error('Salt minion claiming to be {0} has attempted to'
|
||||
'communicate with the master and could not be verified'
|
||||
.format(id_))
|
||||
return False
|
||||
|
||||
def _serve_file(self, load):
|
||||
'''
|
||||
Return a chunk from a file based on the data received
|
||||
'''
|
||||
ret = {'data': '',
|
||||
'dest': ''}
|
||||
if not load.has_key('path')\
|
||||
or not load.has_key('loc')\
|
||||
or not load.has_key('env'):
|
||||
return ret
|
||||
fnd = self.__find_file(load['path'], load['env'])
|
||||
if not fnd['path']:
|
||||
return ret
|
||||
ret['dest'] = fnd['rel']
|
||||
fn_ = open(fnd['path'], 'rb')
|
||||
fn_.seek(load['loc'])
|
||||
ret['data'] = fn_.read(self.opts['file_buffer_size'])
|
||||
return ret
|
||||
|
||||
def _file_hash(self, load):
|
||||
'''
|
||||
Return a file hash, the hash type is set in the master config file
|
||||
'''
|
||||
if not load.has_key('path')\
|
||||
or not load.has_key('env'):
|
||||
return ''
|
||||
path = self.__find_file(load['path'], load['env'])['path']
|
||||
if not path:
|
||||
return {}
|
||||
ret = {}
|
||||
ret['hsum'] = getattr(hashlib, self.opts['hash_type'])(
|
||||
open(path, 'rb').read()).hexdigest()
|
||||
ret['hash_type'] = self.opts['hash_type']
|
||||
return ret
|
||||
|
||||
def _master_opts(self, load):
|
||||
'''
|
||||
Return the master options to the minion
|
||||
'''
|
||||
return self.opts
|
||||
|
||||
def _return(self, load):
|
||||
'''
|
||||
Handle the return data sent from the minions
|
||||
'''
|
||||
# If the return data is invalid, just ignore it
|
||||
if not load.has_key('return')\
|
||||
or not load.has_key('jid')\
|
||||
or not load.has_key('id'):
|
||||
return False
|
||||
log.info('Got return from %(id)s for job %(jid)s', load)
|
||||
jid_dir = os.path.join(self.opts['cachedir'], 'jobs', load['jid'])
|
||||
if not os.path.isdir(jid_dir):
|
||||
log.error(
|
||||
'An inconsistency occurred, a job was received with a job id '
|
||||
'that is not present on the master: %(jid)s', load
|
||||
)
|
||||
return False
|
||||
hn_dir = os.path.join(jid_dir, load['id'])
|
||||
if not os.path.isdir(hn_dir):
|
||||
os.makedirs(hn_dir)
|
||||
pickle.dump(load['return'],
|
||||
open(os.path.join(hn_dir, 'return.p'), 'w+'))
|
||||
if load.has_key('out'):
|
||||
pickle.dump(load['out'],
|
||||
open(os.path.join(hn_dir, 'out.p'), 'w+'))
|
||||
|
||||
def _syndic_return(self, load):
|
||||
'''
|
||||
Recieve a syndic minion return and format it to look like returns from
|
||||
individual minions.
|
||||
'''
|
||||
# Verify the load
|
||||
if not load.has_key('return') \
|
||||
or not load.has_key('jid'):
|
||||
return None
|
||||
# Format individual return loads
|
||||
for key, item in load['return'].items():
|
||||
ret = {'jid': load['jid'],
|
||||
'id': key,
|
||||
'return': item}
|
||||
self._return(ret)
|
||||
|
||||
def minion_publish(self, clear_load):
|
||||
'''
|
||||
Publish a command initiated from a minion, this method executes minion
|
||||
restrictions so that the minion publication will only work if it is
|
||||
enabled in the config.
|
||||
The configuration on the master allows minions to be matched to
|
||||
salt functions, so the minions can only publish allowed salt functions
|
||||
The config will look like this:
|
||||
peer:
|
||||
.*:
|
||||
- .*
|
||||
This configuration will enable all minions to execute all commands.
|
||||
peer:
|
||||
foo.example.com:
|
||||
- test.*
|
||||
This configuration will only allow the minion foo.example.com to
|
||||
execute commands from the test module
|
||||
'''
|
||||
# Verify that the load is valid
|
||||
if not self.opts.has_key('peer'):
|
||||
return {}
|
||||
if not isinstance(self.opts['peer'], dict):
|
||||
return {}
|
||||
if not clear_load.has_key('fun')\
|
||||
or not clear_load.has_key('arg')\
|
||||
or not clear_load.has_key('tgt')\
|
||||
or not clear_load.has_key('ret')\
|
||||
or not clear_load.has_key('tok')\
|
||||
or not clear_load.has_key('id'):
|
||||
return {}
|
||||
# If the command will make a recursive publish don't run
|
||||
if re.match('publish.*', clear_load['fun']):
|
||||
return {}
|
||||
# Check the permisions for this minion
|
||||
if not self.__verify_minion(clear_load['id'], clear_load['tok']):
|
||||
# The minion is not who it says it is!
|
||||
# We don't want to listen to it!
|
||||
return {}
|
||||
perms = set()
|
||||
for match in self.opts['peer']:
|
||||
if re.match(match, clear_load['id']):
|
||||
# This is the list of funcs/modules!
|
||||
if isinstance(self.opts['peer'][match], list):
|
||||
perms.update(self.opts['peer'][match])
|
||||
good = False
|
||||
for perm in perms:
|
||||
if re.match(perm, clear_load['fun']):
|
||||
good = True
|
||||
if not good:
|
||||
return {}
|
||||
# Set up the publication payload
|
||||
jid_dir = os.path.join(
|
||||
self.opts['cachedir'],
|
||||
'jobs',
|
||||
clear_load['jid'])
|
||||
pickle.dump(clear_load, open(os.path.join(jid_dir, '.load.p'), 'w+'))
|
||||
payload = {'enc': 'aes'}
|
||||
load = {
|
||||
'fun': clear_load['fun'],
|
||||
'arg': clear_load['arg'],
|
||||
'tgt': clear_load['tgt'],
|
||||
'jid': clear_load['jid'],
|
||||
'ret': clear_load['ret'],
|
||||
}
|
||||
expr_form = 'glob'
|
||||
timeout = 0
|
||||
if clear_load.has_key('tgt_type'):
|
||||
load['tgt_type'] = clear_load['tgt_type']
|
||||
expr_form = load['tgt_type']
|
||||
if clear_load.has_key('timeout'):
|
||||
timeout = clear_load('timeout')
|
||||
# Encrypt!
|
||||
payload['load'] = self.crypticle.dumps(load)
|
||||
# Connect to the publisher
|
||||
context = zmq.Context(1)
|
||||
pub_sock = context.socket(zmq.PUSH)
|
||||
pull_uri = 'ipc://{0}'.format(
|
||||
os.path.join(self.opts['sock_dir'], 'publish_pull.ipc')
|
||||
)
|
||||
pub_sock.connect(pull_uri)
|
||||
pub_sock.send(salt.payload.package(payload))
|
||||
# Run the client get_returns method
|
||||
return self.local.get_returns(
|
||||
clear_load['jid'],
|
||||
self.local.check_minions(
|
||||
clear_load['tgt'],
|
||||
expr_form
|
||||
),
|
||||
timeout
|
||||
)
|
||||
|
||||
def run_func(self, func, load):
|
||||
'''
|
||||
Wrapper for running functions executed with AES encryption
|
||||
'''
|
||||
# Don't honor private functions
|
||||
if func.startswith('__'):
|
||||
return self.crypticle.dumps({})
|
||||
# Run the func
|
||||
ret = getattr(self, func)(load)
|
||||
# Don't encrypt the return value for the _return func
|
||||
# (we don't care about the return value, so why encrypt it?)
|
||||
if func == '_return':
|
||||
return ret
|
||||
# AES Encrypt the return
|
||||
return self.crypticle.dumps(ret)
|
||||
|
||||
class ClearFuncs(object):
|
||||
'''
|
||||
Set up functions that are safe to execute when commands sent to the master
|
||||
without encryption and authentication
|
||||
'''
|
||||
# The ClearFuncs object encasulates the functions that can be executed in
|
||||
# the clear:
|
||||
# publish (The publish from the LocalClient)
|
||||
# _auth
|
||||
def __init__(self, opts, key, master_key, crypticle):
|
||||
self.opts = opts
|
||||
self.key = key
|
||||
self.master_key = master_key
|
||||
self.crypticle = crypticle
|
||||
# Make a client
|
||||
self.local = salt.client.LocalClient(self.opts['conf_file'])
|
||||
|
||||
def _send_cluster(self):
|
||||
'''
|
||||
Send the cluster data out
|
||||
'''
|
||||
log.debug('Sending out cluster data')
|
||||
ret = self.local.cmd(self.opts['cluster_masters'],
|
||||
'cluster.distrib',
|
||||
self._cluster_load(),
|
||||
0,
|
||||
'list'
|
||||
)
|
||||
log.debug('Cluster distributed: %s', ret)
|
||||
|
||||
def _cluster_load(self):
|
||||
'''
|
||||
Generates the data sent to the cluster nodes.
|
||||
'''
|
||||
minions = {}
|
||||
master_pem = ''
|
||||
master_conf = open(self.opts['conf_file'], 'r').read()
|
||||
minion_dir = os.path.join(self.opts['pki_dir'], 'minions')
|
||||
for host in os.listdir(minion_dir):
|
||||
pub = os.path.join(minion_dir, host)
|
||||
minions[host] = open(pub, 'r').read()
|
||||
if self.opts['cluster_mode'] == 'full':
|
||||
master_pem = open(os.path.join(self.opts['pki_dir'],
|
||||
'master.pem')).read()
|
||||
return [minions,
|
||||
master_conf,
|
||||
master_pem,
|
||||
self.opts['conf_file']]
|
||||
|
||||
def _auth(self, load):
|
||||
'''
|
||||
Authenticate the client, use the sent public key to encrypt the aes key
|
||||
which was generated at start up
|
||||
'''
|
||||
# 1. Verify that the key we are receiving matches the stored key
|
||||
# 2. Store the key if it is not there
|
||||
# 3. make an rsa key with the pub key
|
||||
# 4. encrypt the aes key as an encrypted pickle
|
||||
# 5. package the return and return it
|
||||
log.info('Authentication request from %(id)s', load)
|
||||
pubfn = os.path.join(self.opts['pki_dir'],
|
||||
'minions',
|
||||
load['id'])
|
||||
pubfn_pend = os.path.join(self.opts['pki_dir'],
|
||||
'minions_pre',
|
||||
load['id'])
|
||||
if self.opts['open_mode']:
|
||||
# open mode is turned on, nuts to checks and overwrite whatever
|
||||
# is there
|
||||
pass
|
||||
elif os.path.isfile(pubfn):
|
||||
# The key has been accepted check it
|
||||
if not open(pubfn, 'r').read() == load['pub']:
|
||||
log.error(
|
||||
'Authentication attempt from %(id)s failed, the public '
|
||||
'keys did not match. This may be an attempt to compromise '
|
||||
'the Salt cluster.', load
|
||||
)
|
||||
ret = {'enc': 'clear',
|
||||
'load': {'ret': False}}
|
||||
return ret
|
||||
elif not os.path.isfile(pubfn_pend)\
|
||||
and not self.opts['auto_accept']:
|
||||
# This is a new key, stick it in pre
|
||||
log.info('New public key placed in pending for %(id)s', load)
|
||||
open(pubfn_pend, 'w+').write(load['pub'])
|
||||
ret = {'enc': 'clear',
|
||||
'load': {'ret': True}}
|
||||
return ret
|
||||
elif os.path.isfile(pubfn_pend)\
|
||||
and not self.opts['auto_accept']:
|
||||
# This key is in pending, if it is the same key ret True, else
|
||||
# ret False
|
||||
if not open(pubfn_pend, 'r').read() == load['pub']:
|
||||
log.error(
|
||||
'Authentication attempt from %(id)s failed, the public '
|
||||
'keys in pending did not match. This may be an attempt to '
|
||||
'compromise the Salt cluster.', load
|
||||
)
|
||||
return {'enc': 'clear',
|
||||
'load': {'ret': False}}
|
||||
else:
|
||||
log.info(
|
||||
'Authentication failed from host %(id)s, the key is in '
|
||||
'pending and needs to be accepted with saltkey -a %(id)s',
|
||||
load
|
||||
)
|
||||
return {'enc': 'clear',
|
||||
'load': {'ret': True}}
|
||||
elif not os.path.isfile(pubfn_pend)\
|
||||
and self.opts['auto_accept']:
|
||||
# This is a new key and auto_accept is turned on
|
||||
pass
|
||||
else:
|
||||
# Something happened that I have not accounted for, FAIL!
|
||||
return {'enc': 'clear',
|
||||
'load': {'ret': False}}
|
||||
|
||||
log.info('Authentication accepted from %(id)s', load)
|
||||
open(pubfn, 'w+').write(load['pub'])
|
||||
key = RSA.load_pub_key(pubfn)
|
||||
ret = {'enc': 'pub',
|
||||
'pub_key': self.master_key.pub_str,
|
||||
'token': self.master_key.token,
|
||||
'publish_port': self.opts['publish_port'],
|
||||
}
|
||||
ret['aes'] = key.public_encrypt(self.opts['aes'], 4)
|
||||
if self.opts['cluster_masters']:
|
||||
self._send_cluster()
|
||||
return ret
|
||||
|
||||
def publish(self, clear_load):
|
||||
'''
|
||||
This method sends out publications to the minions, it can only be used
|
||||
by the LocalClient.
|
||||
'''
|
||||
# Verify that the caller has root on master
|
||||
if not clear_load.pop('key') == self.key:
|
||||
return ''
|
||||
jid_dir = os.path.join(self.opts['cachedir'], 'jobs', clear_load['jid'])
|
||||
# Verify the jid dir
|
||||
if not os.path.isdir(jid_dir):
|
||||
os.makedirs(jid_dir)
|
||||
# Save the invocation information
|
||||
pickle.dump(clear_load, open(os.path.join(jid_dir, '.load.p'), 'w+'))
|
||||
# Set up the payload
|
||||
payload = {'enc': 'aes'}
|
||||
load = {
|
||||
'fun': clear_load['fun'],
|
||||
'arg': clear_load['arg'],
|
||||
'tgt': clear_load['tgt'],
|
||||
'jid': clear_load['jid'],
|
||||
'ret': clear_load['ret'],
|
||||
}
|
||||
if clear_load.has_key('tgt_type'):
|
||||
load['tgt_type'] = clear_load['tgt_type']
|
||||
if clear_load.has_key('to'):
|
||||
load['to'] = clear_load['to']
|
||||
payload['load'] = self.crypticle.dumps(load)
|
||||
# Send 0MQ to the publisher
|
||||
context = zmq.Context(1)
|
||||
pub_sock = context.socket(zmq.PUSH)
|
||||
pull_uri = 'ipc://{0}'.format(
|
||||
os.path.join(self.opts['sock_dir'], 'publish_pull.ipc')
|
||||
)
|
||||
pub_sock.connect(pull_uri)
|
||||
pub_sock.send(salt.payload.package(payload))
|
||||
return {'enc': 'clear',
|
||||
'load': {'jid': clear_load['jid']}}
|
577
debian/salt/usr/share/pyshared/salt/minion.py
vendored
577
debian/salt/usr/share/pyshared/salt/minion.py
vendored
|
@ -1,577 +0,0 @@
|
|||
'''
|
||||
Routines to set up a minion
|
||||
'''
|
||||
# Import python libs
|
||||
import os
|
||||
import distutils.sysconfig
|
||||
import glob
|
||||
import re
|
||||
import time
|
||||
import logging
|
||||
import tempfile
|
||||
import traceback
|
||||
import shutil
|
||||
import threading
|
||||
import multiprocessing
|
||||
|
||||
# Import zeromq libs
|
||||
import zmq
|
||||
# Import salt libs
|
||||
import salt.crypt
|
||||
from salt.crypt import AuthenticationError
|
||||
import salt.utils
|
||||
import salt.modules
|
||||
import salt.returners
|
||||
import salt.loader
|
||||
import salt.client
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
# To set up a minion:
|
||||
# 1, Read in the configuration
|
||||
# 2. Generate the function mapping dict
|
||||
# 3. Authenticate with the master
|
||||
# 4. Store the aes key
|
||||
# 5. connect to the publisher
|
||||
# 6. handle publications
|
||||
|
||||
class MinionError(Exception): pass
|
||||
|
||||
|
||||
class SMinion(object):
|
||||
'''
|
||||
Create an object that has loaded all of the minion module functions,
|
||||
grains, modules, returners etc.
|
||||
The SMinion allows developers to generate all of the salt minion functions
|
||||
and present them with these functions for general use.
|
||||
'''
|
||||
def __init__(self, opts):
|
||||
# Generate all of the minion side components
|
||||
self.opts = opts
|
||||
self.functions = salt.loader.minion_mods(self.opts)
|
||||
self.returners = salt.loader.returners(self.opts)
|
||||
self.states = salt.loader.states(self.opts, self.functions)
|
||||
self.rend = salt.loader.render(self.opts, self.functions)
|
||||
self.matcher = Matcher(self.opts, self.functions)
|
||||
|
||||
|
||||
class Minion(object):
|
||||
'''
|
||||
This class instantiates a minion, runs connections for a minion, and loads
|
||||
all of the functions into the minion
|
||||
'''
|
||||
def __init__(self, opts):
|
||||
'''
|
||||
Pass in the options dict
|
||||
'''
|
||||
self.opts = opts
|
||||
self.mod_opts = self.__prep_mod_opts()
|
||||
self.functions, self.returners = self.__load_modules()
|
||||
self.matcher = Matcher(self.opts, self.functions)
|
||||
self.authenticate()
|
||||
|
||||
def __prep_mod_opts(self):
|
||||
'''
|
||||
Returns a deep copy of the opts with key bits stripped out
|
||||
'''
|
||||
mod_opts = {}
|
||||
for key, val in self.opts.items():
|
||||
if key == 'logger':
|
||||
continue
|
||||
mod_opts[key] = val
|
||||
return mod_opts
|
||||
|
||||
def __load_modules(self):
|
||||
'''
|
||||
Return the functions and the returners loaded up from the loader module
|
||||
'''
|
||||
functions = salt.loader.minion_mods(self.opts)
|
||||
returners = salt.loader.returners(self.opts)
|
||||
return functions, returners
|
||||
|
||||
def _handle_payload(self, payload):
|
||||
'''
|
||||
Takes a payload from the master publisher and does whatever the
|
||||
master wants done.
|
||||
'''
|
||||
{'aes': self._handle_aes,
|
||||
'pub': self._handle_pub,
|
||||
'clear': self._handle_clear}[payload['enc']](payload['load'])
|
||||
|
||||
def _handle_aes(self, load):
|
||||
'''
|
||||
Takes the aes encrypted load, decrypts is and runs the encapsulated
|
||||
instructions
|
||||
'''
|
||||
data = None
|
||||
try:
|
||||
data = self.crypticle.loads(load)
|
||||
except AuthenticationError:
|
||||
self.authenticate()
|
||||
data = self.crypticle.loads(load)
|
||||
# Verify that the publication is valid
|
||||
if not data.has_key('tgt')\
|
||||
or not data.has_key('jid')\
|
||||
or not data.has_key('fun')\
|
||||
or not data.has_key('arg'):
|
||||
return
|
||||
# Verify that the publication applies to this minion
|
||||
if data.has_key('tgt_type'):
|
||||
if not getattr(self.matcher, data['tgt_type'] + '_match')(data['tgt']):
|
||||
return
|
||||
else:
|
||||
if not self.matcher.glob_match(data['tgt']):
|
||||
return
|
||||
# If the minion does not have the function, don't execute, this prevents
|
||||
# minions that could not load a minion module from returning a
|
||||
# predictable exception
|
||||
#if not self.functions.has_key(data['fun']):
|
||||
# return
|
||||
log.debug('Executing command {0[fun]} with jid {0[jid]}'.format(data))
|
||||
self._handle_decoded_payload(data)
|
||||
|
||||
def _handle_pub(self, load):
|
||||
'''
|
||||
Handle public key payloads
|
||||
'''
|
||||
pass
|
||||
|
||||
def _handle_clear(self, load):
|
||||
'''
|
||||
Handle un-encrypted transmissions
|
||||
'''
|
||||
pass
|
||||
|
||||
def _handle_decoded_payload(self, data):
|
||||
'''
|
||||
Override this method if you wish to handle the decoded data differently.
|
||||
'''
|
||||
if self.opts['multiprocessing']:
|
||||
if type(data['fun']) == type(list()):
|
||||
multiprocessing.Process(
|
||||
target=lambda: self._thread_multi_return(data)
|
||||
).start()
|
||||
else:
|
||||
multiprocessing.Process(
|
||||
target=lambda: self._thread_return(data)
|
||||
).start()
|
||||
else:
|
||||
if type(data['fun']) == type(list()):
|
||||
threading.Thread(
|
||||
target=lambda: self._thread_multi_return(data)
|
||||
).start()
|
||||
else:
|
||||
threading.Thread(
|
||||
target=lambda: self._thread_return(data)
|
||||
).start()
|
||||
|
||||
def _thread_return(self, data):
|
||||
'''
|
||||
This method should be used as a threading target, start the actual
|
||||
minion side execution.
|
||||
'''
|
||||
ret = {}
|
||||
for ind in range(0, len(data['arg'])):
|
||||
try:
|
||||
data['arg'][ind] = eval(data['arg'][ind])
|
||||
except:
|
||||
pass
|
||||
|
||||
function_name = data['fun']
|
||||
if function_name in self.functions:
|
||||
try:
|
||||
ret['return'] = self.functions[data['fun']](*data['arg'])
|
||||
except Exception as exc:
|
||||
trb = traceback.format_exc()
|
||||
log.warning('The minion function caused an exception: %s', exc)
|
||||
ret['return'] = trb
|
||||
else:
|
||||
ret['return'] = '"%s" is not available.' % function_name
|
||||
|
||||
ret['jid'] = data['jid']
|
||||
ret['fun'] = data['fun']
|
||||
if data['ret']:
|
||||
ret['id'] = self.opts['id']
|
||||
try:
|
||||
self.returners[data['ret']](ret)
|
||||
except Exception as exc:
|
||||
log.error('The return failed for job %s %s', data['jid'], exc)
|
||||
else:
|
||||
self._return_pub(ret)
|
||||
|
||||
def _thread_multi_return(self, data):
|
||||
'''
|
||||
This method should be used as a threading target, start the actual
|
||||
minion side execution.
|
||||
'''
|
||||
ret = {'return': {}}
|
||||
for ind in range(0, len(data['fun'])):
|
||||
for index in range(0, len(data['arg'][ind])):
|
||||
try:
|
||||
data['arg'][ind][index] = eval(data['arg'][ind][index])
|
||||
except:
|
||||
pass
|
||||
|
||||
try:
|
||||
ret['return'][data['fun'][ind]]\
|
||||
= self.functions[data['fun'][ind]](*data['arg'][ind])
|
||||
except Exception as exc:
|
||||
trb = traceback.format_exc()
|
||||
log.warning('The minion function caused an exception: %s', exc)
|
||||
ret['return'][data['fun'][ind]] = trb
|
||||
ret['jid'] = data['jid']
|
||||
if data['ret']:
|
||||
ret['id'] = self.opts['id']
|
||||
try:
|
||||
self.returners[data['ret']](ret)
|
||||
except Exception as exc:
|
||||
log.error('The return failed for job %s %s', data['jid'], exc)
|
||||
else:
|
||||
self._return_pub(ret)
|
||||
|
||||
def _return_pub(self, ret, ret_cmd='_return'):
|
||||
'''
|
||||
Return the data from the executed command to the master server
|
||||
'''
|
||||
log.info('Returning information for job: %(jid)s', ret)
|
||||
context = zmq.Context()
|
||||
socket = context.socket(zmq.REQ)
|
||||
socket.connect(self.opts['master_uri'])
|
||||
payload = {'enc': 'aes'}
|
||||
if ret_cmd == '_syndic_return':
|
||||
load = {'cmd': ret_cmd,
|
||||
'jid': ret['jid']}
|
||||
load['return'] = {}
|
||||
for key, value in ret.items():
|
||||
if key == 'jid' or key == 'fun':
|
||||
continue
|
||||
load['return'][key] = value
|
||||
else:
|
||||
load = {'return': ret['return'],
|
||||
'cmd': ret_cmd,
|
||||
'jid': ret['jid'],
|
||||
'id': self.opts['id']}
|
||||
if hasattr(self.functions[ret['fun']], '__outputter__'):
|
||||
oput = self.functions[ret['fun']].__outputter__
|
||||
if isinstance(oput, str):
|
||||
load['out'] = oput
|
||||
payload['load'] = self.crypticle.dumps(load)
|
||||
socket.send_pyobj(payload)
|
||||
return socket.recv()
|
||||
|
||||
def reload_functions(self):
|
||||
'''
|
||||
Reload the functions dict for this minion, reading in any new functions
|
||||
'''
|
||||
self.functions = self.__load_functions()
|
||||
log.debug('Refreshed functions, loaded functions: %s', self.functions)
|
||||
return True
|
||||
|
||||
def authenticate(self):
|
||||
'''
|
||||
Authenticate with the master, this method breaks the functional
|
||||
paradigm, it will update the master information from a fresh sign in,
|
||||
signing in can occur as often as needed to keep up with the revolving
|
||||
master aes key.
|
||||
'''
|
||||
log.debug('Attempting to authenticate with the Salt Master')
|
||||
auth = salt.crypt.Auth(self.opts)
|
||||
while True:
|
||||
creds = auth.sign_in()
|
||||
if creds != 'retry':
|
||||
log.info('Authentication with master successful!')
|
||||
break
|
||||
log.info('Waiting for minion key to be accepted by the master.')
|
||||
time.sleep(10)
|
||||
self.aes = creds['aes']
|
||||
self.publish_port = creds['publish_port']
|
||||
self.crypticle = salt.crypt.Crypticle(self.aes)
|
||||
|
||||
def tune_in(self):
|
||||
'''
|
||||
Lock onto the publisher. This is the main event loop for the minion
|
||||
'''
|
||||
master_pub = 'tcp://' + self.opts['master_ip'] + ':'\
|
||||
+ str(self.publish_port)
|
||||
context = zmq.Context()
|
||||
socket = context.socket(zmq.SUB)
|
||||
socket.connect(master_pub)
|
||||
socket.setsockopt(zmq.SUBSCRIBE, '')
|
||||
while True:
|
||||
payload = socket.recv_pyobj()
|
||||
self._handle_payload(payload)
|
||||
|
||||
|
||||
class Syndic(salt.client.LocalClient, Minion):
|
||||
'''
|
||||
Make a Syndic minion, this minion will use the minion keys on the master to
|
||||
authenticate with a higher level master.
|
||||
'''
|
||||
def __init__(self, opts):
|
||||
salt.client.LocalClient.__init__(self, opts['_master_conf_file'])
|
||||
Minion.__init__(self, opts)
|
||||
|
||||
def _handle_aes(self, load):
|
||||
'''
|
||||
Takes the aes encrypted load, decrypts is and runs the encapsulated
|
||||
instructions
|
||||
'''
|
||||
data = None
|
||||
# If the AES authentication has changed, re-authenticate
|
||||
try:
|
||||
data = self.crypticle.loads(load)
|
||||
except AuthenticationError:
|
||||
self.authenticate()
|
||||
data = self.crypticle.loads(load)
|
||||
# Verify that the publication is valid
|
||||
if not data.has_key('tgt')\
|
||||
or not data.has_key('jid')\
|
||||
or not data.has_key('fun')\
|
||||
or not data.has_key('to')\
|
||||
or not data.has_key('arg'):
|
||||
return
|
||||
data['to'] = int(data['to']) - 1
|
||||
log.debug('Executing syndic command {0[fun]} with jid {0[jid]}'.format(data))
|
||||
self._handle_decoded_payload(data)
|
||||
|
||||
def _handle_decoded_payload(self, data):
|
||||
'''
|
||||
Override this method if you wish to handle the decoded data differently.
|
||||
'''
|
||||
if self.opts['multiprocessing']:
|
||||
multiprocessing.Process(
|
||||
target=lambda: self.syndic_cmd(data)
|
||||
).start()
|
||||
else:
|
||||
threading.Thread(
|
||||
target=lambda: self.syndic_cmd(data)
|
||||
).start()
|
||||
|
||||
def syndic_cmd(self, data):
|
||||
'''
|
||||
Take the now clear load and forward it on to the client cmd
|
||||
'''
|
||||
#{'tgt_type': 'glob', 'jid': '20110817205225753516', 'tgt': '*', 'ret': '', 'to': 4, 'arg': [], 'fun': 'test.ping'}
|
||||
# Set up default expr_form
|
||||
if not data.has_key('expr_form'):
|
||||
data['expr_form'] = 'glob'
|
||||
# Send out the publication
|
||||
pub_data = self.pub(
|
||||
data['tgt'],
|
||||
data['fun'],
|
||||
data['arg'],
|
||||
data['expr_form'],
|
||||
data['ret'],
|
||||
data['jid'],
|
||||
data['to']
|
||||
)
|
||||
# Gather the return data
|
||||
ret = self.get_returns(
|
||||
pub_data['jid'],
|
||||
pub_data['minions'],
|
||||
data['to']
|
||||
)
|
||||
ret['jid'] = data['jid']
|
||||
ret['fun'] = data['fun']
|
||||
# Return the publication data up the pipe
|
||||
self._return_pub(ret, '_syndic_return')
|
||||
|
||||
class Matcher(object):
|
||||
'''
|
||||
Use to return the value for matching calls from the master
|
||||
'''
|
||||
def __init__(self, opts, functions=None):
|
||||
self.opts = opts
|
||||
if not functions:
|
||||
functions = salt.loader.minion_mods(self.opts)
|
||||
else:
|
||||
self.functions = functions
|
||||
|
||||
def confirm_top(self, match, data):
|
||||
'''
|
||||
Takes the data passed to a top file environment and determines if the
|
||||
data matches this minion
|
||||
'''
|
||||
matcher = 'glob'
|
||||
for item in data:
|
||||
if type(item) == type(dict()):
|
||||
if item.has_key('match'):
|
||||
matcher = item['match']
|
||||
if hasattr(self, matcher + '_match'):
|
||||
return getattr(self, matcher + '_match')(match)
|
||||
else:
|
||||
log.error('Attempting to match with unknown matcher: %s', matcher)
|
||||
return False
|
||||
|
||||
def glob_match(self, tgt):
|
||||
'''
|
||||
Returns true if the passed glob matches the id
|
||||
'''
|
||||
tmp_dir = tempfile.mkdtemp()
|
||||
cwd = os.getcwd()
|
||||
os.chdir(tmp_dir)
|
||||
open(self.opts['id'], 'w+').write('salt')
|
||||
ret = bool(glob.glob(tgt))
|
||||
os.chdir(cwd)
|
||||
shutil.rmtree(tmp_dir)
|
||||
return ret
|
||||
|
||||
def pcre_match(self, tgt):
|
||||
'''
|
||||
Returns true if the passed pcre regex matches
|
||||
'''
|
||||
return bool(re.match(tgt, self.opts['id']))
|
||||
|
||||
def list_match(self, tgt):
|
||||
'''
|
||||
Determines if this host is on the list
|
||||
'''
|
||||
return bool(tgt.count(self.opts['id']))
|
||||
|
||||
def grain_match(self, tgt):
|
||||
'''
|
||||
Reads in the grains regular expression match
|
||||
'''
|
||||
comps = tgt.split(':')
|
||||
if len(comps) < 2:
|
||||
log.error('Got insufficient arguments for grains from master')
|
||||
return False
|
||||
if not self.opts['grains'].has_key(comps[0]):
|
||||
log.error('Got unknown grain from master: %s', comps[0])
|
||||
return False
|
||||
return bool(re.match(comps[1], self.opts['grains'][comps[0]]))
|
||||
|
||||
def exsel_match(self, tgt):
|
||||
'''
|
||||
Runs a function and return the exit code
|
||||
'''
|
||||
if not self.functions.has_key(tgt):
|
||||
return False
|
||||
return(self.functions[tgt]())
|
||||
|
||||
|
||||
class FileClient(object):
|
||||
'''
|
||||
Interact with the salt master file server.
|
||||
'''
|
||||
def __init__(self, opts):
|
||||
self.opts = opts
|
||||
self.auth = salt.crypt.SAuth(opts)
|
||||
self.socket = self.__get_socket()
|
||||
|
||||
def __get_socket(self):
|
||||
'''
|
||||
Return the ZeroMQ socket to use
|
||||
'''
|
||||
context = zmq.Context()
|
||||
socket = context.socket(zmq.REQ)
|
||||
socket.connect(self.opts['master_uri'])
|
||||
return socket
|
||||
|
||||
def _check_proto(self, path):
|
||||
'''
|
||||
Make sure that this path is intended for the salt master and trim it
|
||||
'''
|
||||
if not path.startswith('salt://'):
|
||||
raise MinionError('Unsupported path')
|
||||
return path[7:]
|
||||
|
||||
def get_file(self, path, dest='', makedirs=False, env='base'):
|
||||
'''
|
||||
Get a single file from the salt-master
|
||||
'''
|
||||
path = self._check_proto(path)
|
||||
payload = {'enc': 'aes'}
|
||||
fn_ = None
|
||||
if dest:
|
||||
destdir = os.path.dirname(dest)
|
||||
if not os.path.isdir(destdir):
|
||||
if makedirs:
|
||||
os.makedirs(destdir)
|
||||
else:
|
||||
return False
|
||||
fn_ = open(dest, 'w+')
|
||||
load = {'path': path,
|
||||
'env': env,
|
||||
'cmd': '_serve_file'}
|
||||
while True:
|
||||
if not fn_:
|
||||
load['loc'] = 0
|
||||
else:
|
||||
load['loc'] = fn_.tell()
|
||||
payload['load'] = self.auth.crypticle.dumps(load)
|
||||
self.socket.send_pyobj(payload)
|
||||
data = self.auth.crypticle.loads(self.socket.recv_pyobj())
|
||||
if not data['data']:
|
||||
break
|
||||
if not fn_:
|
||||
dest = os.path.join(
|
||||
self.opts['cachedir'],
|
||||
'files',
|
||||
data['dest']
|
||||
)
|
||||
destdir = os.path.dirname(dest)
|
||||
if not os.path.isdir(destdir):
|
||||
os.makedirs(destdir)
|
||||
fn_ = open(dest, 'w+')
|
||||
fn_.write(data['data'])
|
||||
return dest
|
||||
|
||||
def cache_file(self, path, env='base'):
|
||||
'''
|
||||
Pull a file down from the file server and store it in the minion file
|
||||
cache
|
||||
'''
|
||||
return self.get_file(path, '', True, env)
|
||||
|
||||
def cache_files(self, paths, env='base'):
|
||||
'''
|
||||
Download a list of files stored on the master and put them in the minion
|
||||
file cache
|
||||
'''
|
||||
ret = []
|
||||
for path in paths:
|
||||
ret.append(self.cache_file(path, env))
|
||||
return ret
|
||||
|
||||
def hash_file(self, path, env='base'):
|
||||
'''
|
||||
Return the hash of a file, to get the hash of a file on the
|
||||
salt master file server prepend the path with salt://<file on server>
|
||||
otherwise, prepend the file with / for a local file.
|
||||
'''
|
||||
path = self._check_proto(path)
|
||||
payload = {'enc': 'aes'}
|
||||
load = {'path': path,
|
||||
'env': env,
|
||||
'cmd': '_file_hash'}
|
||||
payload['load'] = self.auth.crypticle.dumps(load)
|
||||
self.socket.send_pyobj(payload)
|
||||
return self.auth.crypticle.loads(self.socket.recv_pyobj())
|
||||
|
||||
def get_state(self, sls, env):
|
||||
'''
|
||||
Get a state file from the master and store it in the local minion cache
|
||||
return the location of the file
|
||||
'''
|
||||
if sls.count('.'):
|
||||
sls = sls.replace('.', '/')
|
||||
for path in [
|
||||
'salt://' + sls + '.sls',
|
||||
os.path.join('salt://', sls, 'init.sls')
|
||||
]:
|
||||
dest = self.cache_file(path, env)
|
||||
if dest:
|
||||
return dest
|
||||
return False
|
||||
|
||||
def master_opts(self):
|
||||
'''
|
||||
Return the master opts data
|
||||
'''
|
||||
payload = {'enc': 'aes'}
|
||||
load = {'cmd': '_master_opts'}
|
||||
payload['load'] = self.auth.crypticle.dumps(load)
|
||||
self.socket.send_pyobj(payload)
|
||||
return self.auth.crypticle.loads(self.socket.recv_pyobj())
|
||||
|
|
@ -1,167 +0,0 @@
|
|||
'''
|
||||
Support for Apache
|
||||
'''
|
||||
|
||||
import subprocess
|
||||
|
||||
def __detect_os():
|
||||
'''
|
||||
Apache commands and paths differ depending on packaging
|
||||
'''
|
||||
httpd = 'CentOS Scientific RedHat Fedora'
|
||||
apache2 = 'Ubuntu'
|
||||
if httpd.count(__grains__['os']):
|
||||
return 'apachectl'
|
||||
elif apache2.count(__grains__['os']):
|
||||
return 'apache2ctl'
|
||||
else:
|
||||
return 'apachectl'
|
||||
|
||||
def version():
|
||||
'''
|
||||
Return server version from apachectl -v
|
||||
|
||||
CLI Example:
|
||||
salt '*' apache.version
|
||||
'''
|
||||
cmd = __detect_os() + ' -v'
|
||||
out = subprocess.Popen(cmd,
|
||||
shell=True,
|
||||
stdout=subprocess.PIPE).communicate()[0].split('\n')
|
||||
ret = out[0].split(': ')
|
||||
return ret[1]
|
||||
|
||||
def fullversion():
|
||||
'''
|
||||
Return server version from apachectl -V
|
||||
|
||||
CLI Example:
|
||||
salt '*' apache.fullversion
|
||||
'''
|
||||
cmd = __detect_os() + ' -V'
|
||||
ret = {}
|
||||
ret['compiled_with'] = []
|
||||
out = subprocess.Popen(cmd,
|
||||
shell=True,
|
||||
stdout=subprocess.PIPE).communicate()[0].split('\n')
|
||||
for line in out:
|
||||
if not line.count(' '):
|
||||
continue
|
||||
if ': ' in line:
|
||||
comps = line.split(': ')
|
||||
ret[comps[0].strip().lower().replace(' ', '_')] = comps[1].strip()
|
||||
elif ' -D' in line:
|
||||
cw = line.strip(' -D ')
|
||||
ret['compiled_with'].append(cw)
|
||||
return ret
|
||||
|
||||
def modules():
|
||||
'''
|
||||
Return list of static and shared modules from apachectl -M
|
||||
|
||||
CLI Example:
|
||||
salt '*' apache.modules
|
||||
'''
|
||||
cmd = __detect_os() + ' -M'
|
||||
ret = {}
|
||||
ret['static'] = []
|
||||
ret['shared'] = []
|
||||
out = subprocess.Popen(cmd,
|
||||
shell=True,
|
||||
stdout=subprocess.PIPE).communicate()[0].split('\n')
|
||||
for line in out:
|
||||
if not line.count(' '):
|
||||
continue
|
||||
comps = line.split()
|
||||
if '(static)' in line:
|
||||
ret['static'].append(comps[0])
|
||||
if '(shared)' in line:
|
||||
ret['shared'].append(comps[0])
|
||||
return ret
|
||||
|
||||
def servermods():
|
||||
'''
|
||||
Return list of modules compiled into the server (apachectl -l)
|
||||
|
||||
CLI Example:
|
||||
salt '*' apache.servermods
|
||||
'''
|
||||
cmd = __detect_os() + ' -l'
|
||||
ret = []
|
||||
out = subprocess.Popen(cmd,
|
||||
shell=True,
|
||||
stdout=subprocess.PIPE).communicate()[0].split('\n')
|
||||
for line in out:
|
||||
if not line.count(' '):
|
||||
continue
|
||||
if '.c' in line:
|
||||
ret.append(line.strip())
|
||||
return ret
|
||||
|
||||
def directives():
|
||||
'''
|
||||
Return list of directives together with expected arguments
|
||||
and places where the directive is valid (apachectl -L)
|
||||
|
||||
CLI Example:
|
||||
salt '*' apache.directives
|
||||
'''
|
||||
cmd = __detect_os() + ' -L'
|
||||
ret = {}
|
||||
out = subprocess.Popen(cmd,
|
||||
shell=True,
|
||||
stdout=subprocess.PIPE).communicate()[0]
|
||||
out = out.replace('\n\t', '\t')
|
||||
for line in out.split('\n'):
|
||||
if not line.count(' '):
|
||||
continue
|
||||
comps = line.split('\t')
|
||||
desc = '\n'.join(comps[1:])
|
||||
ret[comps[0]] = desc
|
||||
return ret
|
||||
|
||||
def vhosts():
|
||||
'''
|
||||
Show the settings as parsed from the config file (currently
|
||||
only shows the virtualhost settings). (apachectl -S)
|
||||
Because each additional virtual host adds to the execution
|
||||
time, this command may require a long timeout be specified.
|
||||
|
||||
CLI Example:
|
||||
salt -t 10 '*' apache.vhosts
|
||||
'''
|
||||
cmd = __detect_os() + ' -S'
|
||||
ret = {}
|
||||
namevhost = ''
|
||||
out = __salt__['cmd.run'](cmd)
|
||||
for line in out.split('\n'):
|
||||
if not line.count(' '):
|
||||
continue
|
||||
comps = line.split()
|
||||
if 'is a NameVirtualHost' in line:
|
||||
namevhost = comps[0]
|
||||
ret[namevhost] = {}
|
||||
else:
|
||||
if comps[0] == 'default':
|
||||
ret[namevhost]['default'] = {}
|
||||
ret[namevhost]['default']['vhost'] = comps[2]
|
||||
ret[namevhost]['default']['conf'] = comps[3].replace('(', '').replace(')', '')
|
||||
if comps[0] == 'port':
|
||||
ret[namevhost][comps[3]] = {}
|
||||
ret[namevhost][comps[3]]['vhost'] = comps[3]
|
||||
ret[namevhost][comps[3]]['conf'] = comps[4].replace('(', '').replace(')', '')
|
||||
ret[namevhost][comps[3]]['port'] = comps[1]
|
||||
return ret
|
||||
|
||||
def signal(signal = None):
|
||||
'''
|
||||
Signals httpd to start, restart, or stop.
|
||||
|
||||
CLI Example:
|
||||
salt '*' apache.signal restart
|
||||
'''
|
||||
valid_signals = 'start stop restart graceful graceful-stop'
|
||||
if not valid_signals.count(signal):
|
||||
return
|
||||
cmd = __detect_os() + ' -k %s' % signal
|
||||
out = __salt__['cmd.run'](cmd)
|
219
debian/salt/usr/share/pyshared/salt/modules/apt.py
vendored
219
debian/salt/usr/share/pyshared/salt/modules/apt.py
vendored
|
@ -1,219 +0,0 @@
|
|||
'''
|
||||
Support for apt
|
||||
'''
|
||||
|
||||
import subprocess
|
||||
|
||||
def __virtual__():
|
||||
'''
|
||||
Confirm this module is on a Debian based system
|
||||
'''
|
||||
|
||||
return 'pkg' if __grains__['os'] == 'Debian' else False
|
||||
|
||||
def available_version(name):
|
||||
'''
|
||||
The available version of the package in the repository
|
||||
|
||||
CLI Example:
|
||||
salt '*' pkg.available_version <package name>
|
||||
'''
|
||||
version = ''
|
||||
cmd = 'apt-cache show ' + name + ' | grep Version'
|
||||
|
||||
out = subprocess.Popen(cmd,
|
||||
shell=True,
|
||||
stdout=subprocess.PIPE).communicate()[0]
|
||||
|
||||
version_list = out.split()
|
||||
if len(version_list) >= 2:
|
||||
version = version_list[1]
|
||||
|
||||
return version
|
||||
|
||||
def version(name):
|
||||
'''
|
||||
Returns a string representing the package version or an empty string if not
|
||||
installed
|
||||
|
||||
CLI Example:
|
||||
salt '*' pkg.version <package name>
|
||||
'''
|
||||
pkgs = list_pkgs(name)
|
||||
if pkgs.has_key(name):
|
||||
return pkgs[name]
|
||||
else:
|
||||
return ''
|
||||
|
||||
def refresh_db():
|
||||
'''
|
||||
Updates the apt database to latest packages based upon repositories
|
||||
|
||||
Returns a dict: {'<database name>': Bool}
|
||||
|
||||
CLI Example:
|
||||
salt '*' pkg.refresh_db
|
||||
'''
|
||||
cmd = 'apt-get update'
|
||||
out = subprocess.Popen(cmd,
|
||||
shell=True,
|
||||
stdout=subprocess.PIPE).communicate()[0].split('\n')
|
||||
|
||||
servers = {}
|
||||
for line in out:
|
||||
cols = line.split()
|
||||
if not len(cols):
|
||||
continue
|
||||
ident = " ".join(cols[1:4])
|
||||
if cols[0].count('Get'):
|
||||
servers[ident] = True
|
||||
else:
|
||||
servers[ident] = False
|
||||
|
||||
return servers
|
||||
|
||||
def install(pkg, refresh=False):
|
||||
'''
|
||||
Install the passed package
|
||||
|
||||
Return a dict containing the new package names and versions:
|
||||
{'<package>': {'old': '<old-version>',
|
||||
'new': '<new-version>']}
|
||||
|
||||
CLI Example:
|
||||
salt '*' pkg.install <package name>
|
||||
'''
|
||||
if(refresh):
|
||||
refresh_db()
|
||||
|
||||
ret_pkgs = {}
|
||||
old_pkgs = list_pkgs()
|
||||
cmd = 'apt-get -y install ' + pkg
|
||||
subprocess.call(cmd, shell=True)
|
||||
new_pkgs = list_pkgs()
|
||||
|
||||
for pkg in new_pkgs:
|
||||
if old_pkgs.has_key(pkg):
|
||||
if old_pkgs[pkg] == new_pkgs[pkg]:
|
||||
continue
|
||||
else:
|
||||
ret_pkgs[pkg] = {'old': old_pkgs[pkg],
|
||||
'new': new_pkgs[pkg]}
|
||||
else:
|
||||
ret_pkgs[pkg] = {'old': '',
|
||||
'new': new_pkgs[pkg]}
|
||||
|
||||
return ret_pkgs
|
||||
|
||||
def remove(pkg):
|
||||
'''
|
||||
Remove a single package via apt-get remove
|
||||
|
||||
Return a list containing the names of the removed packages:
|
||||
|
||||
CLI Example:
|
||||
salt '*' pkg.remove <package name>
|
||||
'''
|
||||
ret_pkgs = []
|
||||
old_pkgs = list_pkgs()
|
||||
|
||||
cmd = 'apt-get -y remove ' + pkg
|
||||
subprocess.call(cmd, shell=True)
|
||||
new = list_pkgs()
|
||||
|
||||
for pkg in old_pkgs:
|
||||
if not new_pkgs.has_key(pkg):
|
||||
ret_pkgs.append(pkg)
|
||||
|
||||
return ret_pkgs
|
||||
|
||||
def purge(pkg):
|
||||
'''
|
||||
Remove a package via apt-get along with all configuration files and
|
||||
unused dependencies as determined by apt-get autoremove
|
||||
|
||||
Returns a list containing the names of the removed packages
|
||||
|
||||
CLI Example:
|
||||
salt '*' pkg.purge <package name>
|
||||
'''
|
||||
ret_pkgs = []
|
||||
old_pkgs = list_pkgs()
|
||||
|
||||
# Remove inital package
|
||||
purge_cmd = 'apt-get -y purge ' + pkg
|
||||
subprocess.call(purge_cmd, shell=True)
|
||||
|
||||
# Remove any dependencies that are no longer needed
|
||||
autoremove_cmd = 'apt-get -y autoremove'
|
||||
subprocess.call(purge_cmd, shell=True)
|
||||
|
||||
new = list_pkgs()
|
||||
|
||||
for pkg in old_pkgs:
|
||||
if not new_pkgs.has_key(pkg):
|
||||
ret_pkgs.append(pkg)
|
||||
|
||||
return ret_pkgs
|
||||
|
||||
|
||||
def upgrade(refresh=True):
|
||||
'''
|
||||
Upgrades all packages via apt-get dist-upgrade
|
||||
|
||||
Returns a list of dicts containing the package names, and the new and old versions:
|
||||
[
|
||||
{'<package>': {'old': '<old-version>',
|
||||
'new': '<new-version>']
|
||||
}',
|
||||
...
|
||||
]
|
||||
|
||||
CLI Example:
|
||||
salt '*' pkg.upgrade
|
||||
'''
|
||||
|
||||
if(update_repos):
|
||||
refresh_db()
|
||||
|
||||
ret_pkgs = {}
|
||||
old_pkgs = list_pkgs()
|
||||
cmd = 'apt-get -y dist-upgrade'
|
||||
subprocess.call(cmd, shell=True)
|
||||
new_pkgs = list_pkgs()
|
||||
|
||||
for pkg in new_pkgs:
|
||||
if old_pkgs.has_key(pkg):
|
||||
if old_pkgs[pkg] == new_pkgs[pkg]:
|
||||
continue
|
||||
else:
|
||||
ret_pkgs[pkg] = {'old': old_pkgs[pkg],
|
||||
'new': new_pkgs[pkg]}
|
||||
else:
|
||||
ret_pkgs[pkg] = {'old': '',
|
||||
'new': new_pkgs[pkg]}
|
||||
|
||||
return ret_pkgs
|
||||
|
||||
|
||||
def list_pkgs(regex_string=""):
|
||||
'''
|
||||
List the packages currently installed in a dict:
|
||||
{'<package_name>': '<version>'}
|
||||
|
||||
CLI Example:
|
||||
salt '*' pkg.list_pkgs
|
||||
'''
|
||||
ret = {}
|
||||
cmd = 'dpkg --list ' + regex_string
|
||||
|
||||
out = subprocess.Popen(cmd,
|
||||
shell=True,
|
||||
stdout=subprocess.PIPE).communicate()[0].split('\n')
|
||||
|
||||
for line in out:
|
||||
cols = line.split()
|
||||
if len(cols) and cols[0].count('ii'):
|
||||
ret[cols[1]] = cols[2]
|
||||
|
||||
return ret
|
|
@ -1,166 +0,0 @@
|
|||
'''
|
||||
Specialized routines used by the butter cloud component
|
||||
'''
|
||||
# Import salt modules
|
||||
import virt
|
||||
|
||||
# Import python modules
|
||||
import os
|
||||
import shutil
|
||||
import subprocess
|
||||
import copy
|
||||
import tempfile
|
||||
|
||||
def _place_image(image, vda):
|
||||
'''
|
||||
Moves the image file from the image pool into the final destination.
|
||||
'''
|
||||
image_d = image + '.d'
|
||||
vda_dir = os.path.dirname(vda)
|
||||
if not os.path.isdir(vda_dir):
|
||||
os.makedirs(vda_dir)
|
||||
if not os.path.isdir(image_d):
|
||||
# No available images in the pool, copying fresh image
|
||||
shutil.copy(image, vda)
|
||||
return
|
||||
images = os.listdir(image_d)
|
||||
if not images:
|
||||
# No available images in the pool, copying fresh image
|
||||
shutil.copy(image, vda)
|
||||
return
|
||||
shutil.move(os.path.join(image_d, images[0]), vda)
|
||||
|
||||
creds = libvirt_creds()
|
||||
if not os.path.isdir(vda_dir):
|
||||
os.makedirs(vda_dir)
|
||||
tdir = copy.deepcopy(vda_dir)
|
||||
while not tdir == '/':
|
||||
os.chmod(tdir, 493)
|
||||
tdir = os.path.dirname(tdir)
|
||||
ch_cmd = 'chown ' + creds['user'] + ':' + creds['group'] + ' '\
|
||||
+ vda
|
||||
subprocess.call(ch_cmd, shell=True)
|
||||
|
||||
def _gen_pin_drives(pins):
|
||||
'''
|
||||
Generate the "pinned" vm image
|
||||
'''
|
||||
creds = libvirt_creds()
|
||||
for pin in pins:
|
||||
dirname = os.path.dirname(pin['path'])
|
||||
if os.path.exists(pin['path']):
|
||||
continue
|
||||
if not os.path.isdir(dirname):
|
||||
os.makedirs(dirname)
|
||||
tdir = copy.deepcopy(dirname)
|
||||
while not tdir == '/':
|
||||
os.chmod(tdir, 493)
|
||||
tdir = os.path.dirname(tdir)
|
||||
|
||||
i_cmd = 'qemu-img create ' + pin['path'] + ' ' + pin['size'] + 'G'
|
||||
f_cmd = 'yes | mkfs.' + pin['filesystem'] + ' ' + pin['path']
|
||||
ch_cmd = 'chown ' + creds['user'] + ':' + creds['group'] + ' '\
|
||||
+ pin['path']
|
||||
subprocess.call(i_cmd, shell=True)
|
||||
subprocess.call(f_cmd, shell=True)
|
||||
if pin['filesystem'].startswith('ext'):
|
||||
t_cmd = 'tune2fs -c 0 -i 0 ' + pin['filesystem']
|
||||
subprocess.call(t_cmd, shell=True)
|
||||
if pin['format'] == 'qcow2':
|
||||
q_cmd = 'qemu-img convert -O qcow2 ' + pin['path'] + ' '\
|
||||
+ pin['path'] + '.tmp'
|
||||
subprocess.call(q_cmd, shell=True)
|
||||
shutil.move(pin['path'] + '.tmp', pin['path'])
|
||||
subprocess.call(ch_cmd, shell=True)
|
||||
return True
|
||||
|
||||
def _apply_overlay(vda, instance):
|
||||
'''
|
||||
Use libguestfs to apply the overlay under the specified instance to the
|
||||
specified vda
|
||||
'''
|
||||
overlay = os.path.join(instance, 'overlay')
|
||||
if not os.path.isdir(overlay):
|
||||
return False
|
||||
tmp = tempfile.mkdtemp()
|
||||
tar = os.path.join(tmp, 'host.tgz')
|
||||
cwd = os.getcwd()
|
||||
os.chdir(overlay)
|
||||
t_cmd = 'tar cvzf ' + tar + ' *'
|
||||
subprocess.call(t_cmd, shell=True)
|
||||
os.chdir(cwd)
|
||||
g_cmd = 'guestfish -i -a ' + vda + ' tgz-in ' + tar + ' /'
|
||||
subprocess.call(g_cmd, shell=True)
|
||||
shutil.rmtree(tmp)
|
||||
return True
|
||||
|
||||
def libvirt_creds():
|
||||
'''
|
||||
Returns the user and group that the disk images should be owned by
|
||||
|
||||
CLI Example:
|
||||
salt '*' butterkvm.libvirt_creds
|
||||
'''
|
||||
g_cmd = 'grep group /etc/libvirt/qemu.conf'
|
||||
u_cmd = 'grep user /etc/libvirt/qemu.conf'
|
||||
group = subprocess.Popen(g_cmd,
|
||||
shell=True,
|
||||
stdout=subprocess.PIPE).communicate()[0].split('"')[1]
|
||||
user = subprocess.Popen(u_cmd,
|
||||
shell=True,
|
||||
stdout=subprocess.PIPE).communicate()[0].split('"')[1]
|
||||
return {'user': user, 'group': group}
|
||||
|
||||
def local_images(local_path):
|
||||
'''
|
||||
return the virtual machine names for all of the images located in the
|
||||
butter cloud's local_path in a list:
|
||||
|
||||
['vm1.boo.com', 'vm2.foo.com']
|
||||
|
||||
CLI Example:
|
||||
salt '*' buttervm.local_images <image_path>
|
||||
'''
|
||||
if not os.path.isdir(local_path):
|
||||
return []
|
||||
images = os.listdir(local_path)
|
||||
images.sort()
|
||||
return images
|
||||
|
||||
|
||||
def full_butter_data(local_path):
|
||||
'''
|
||||
Return the full virt info, but add butter data!
|
||||
|
||||
CLI Example:
|
||||
salt '*' buttervm.full_butter_data <image_path>
|
||||
'''
|
||||
info = virt.full_info()
|
||||
info['local_images'] = local_images(local_path)
|
||||
return info
|
||||
|
||||
def create(instance, vda, image, pin):
|
||||
'''
|
||||
Create a virtual machine, this is part of the butter vm system and assumes
|
||||
that the files prepared by butter are available via shared storage.
|
||||
AKA - don't call this from the command line!
|
||||
|
||||
Arguments:
|
||||
instance - string, The path to the instance directory for the given vm on
|
||||
shared storage
|
||||
vda - The location where the virtual machine image needs to be placed
|
||||
image - The image to move into place
|
||||
pin - a "pin" data structure defining the myriad of possible vdb-vbz disk
|
||||
images to generate.
|
||||
|
||||
CLI Example:
|
||||
salt '*' butterkvm.create <instance dir> <root image location>\
|
||||
<Destination> <pin data>
|
||||
'''
|
||||
if not os.path.isfile(vda):
|
||||
# Check that this is a fresh vm image, if so, copy it into place any
|
||||
# apply the overlay, otherwise, just start the vm
|
||||
_place_image(image, vda)
|
||||
_gen_pin_drives(pin)
|
||||
_apply_overlay(vda, instance)
|
||||
virt.create_xml_path(os.path.join(instance, 'config.xml'))
|
|
@ -1,32 +0,0 @@
|
|||
'''
|
||||
The cluster module is used to distribute and activate salt HA cluster
|
||||
components
|
||||
'''
|
||||
# Import Python Modules
|
||||
import os
|
||||
# Import Salt Modules
|
||||
import salt.config
|
||||
|
||||
def distrib(minions,
|
||||
master_conf,
|
||||
master_pem,
|
||||
conf_file):
|
||||
'''
|
||||
Set up this minion as a failover master - only intended for use by the
|
||||
cluster interface
|
||||
'''
|
||||
# Write the master config file
|
||||
open(conf_file, 'w+').write(master_conf)
|
||||
# Get the distributed master config opts
|
||||
opts = salt.config.master_config(conf_file)
|
||||
# Commit the minions
|
||||
minion_dir = os.path.join(opts['pki_dir'], 'minions')
|
||||
if not os.path.isdir(minion_dir):
|
||||
os.makedirs(minion_dir)
|
||||
for minion in minions:
|
||||
open(os.path.join(minion_dir, minion),
|
||||
'w+').write(minions[minion])
|
||||
# Commit the master.pem and verify the cluster interface
|
||||
if master_pem:
|
||||
open(os.path.join(opts['pki_dir'],
|
||||
'master.pem'), 'w+').write(master_pem)
|
145
debian/salt/usr/share/pyshared/salt/modules/cmd.py
vendored
145
debian/salt/usr/share/pyshared/salt/modules/cmd.py
vendored
|
@ -1,145 +0,0 @@
|
|||
'''
|
||||
A module for shelling out
|
||||
|
||||
Keep in mind that this module is insecure, in that it can give whomever has
|
||||
access to the master root execution access to all salt minions
|
||||
'''
|
||||
# Import Python libs
|
||||
import subprocess
|
||||
import tempfile
|
||||
import os
|
||||
import logging
|
||||
|
||||
# Set up logging
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
# Set the default working directory to the home directory
|
||||
# of the user salt-minion is running as. Default: /root
|
||||
DEFAULT_CWD = os.path.expanduser('~')
|
||||
|
||||
# Set up the default outputters
|
||||
__outputter__ = {
|
||||
'run': 'txt'
|
||||
}
|
||||
|
||||
def _is_exec(path):
|
||||
'''
|
||||
Return true if the passed path exists and is execuatable
|
||||
'''
|
||||
return os.path.exists(fpath) and os.access(fpath, os.X_OK)
|
||||
|
||||
def run(cmd, cwd=DEFAULT_CWD):
|
||||
'''
|
||||
Execute the passed command and return the output as a string
|
||||
|
||||
CLI Example:
|
||||
salt '*' cmd.run "ls -l | grep foo | awk '{print $2}'"
|
||||
'''
|
||||
log.info('Executing command {0} in directory {1}'.format(cmd, cwd))
|
||||
out = subprocess.Popen(cmd,
|
||||
shell=True,
|
||||
cwd=cwd,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.STDOUT).communicate()[0]
|
||||
log.debug(out)
|
||||
return out
|
||||
|
||||
def run_stdout(cmd, cwd=DEFAULT_CWD):
|
||||
'''
|
||||
Execute a command, and only return the standard out
|
||||
|
||||
CLI Example:
|
||||
salt '*' cmd.run "ls -l | grep foo | awk '{print $2}'"
|
||||
'''
|
||||
log.info('Executing command {0} in directory {1}'.format(cmd, cwd))
|
||||
stdout = subprocess.Popen(cmd,
|
||||
shell=True,
|
||||
cwd=cwd,
|
||||
stdout=subprocess.PIPE).communicate()[0]
|
||||
log.debug(stdout)
|
||||
return stdout
|
||||
|
||||
def run_stderr(cmd, cwd=DEFAULT_CWD):
|
||||
'''
|
||||
Execute a command and only return the standard error
|
||||
|
||||
CLI Example:
|
||||
salt '*' cmd.run "ls -l | grep foo | awk '{print $2}'"
|
||||
'''
|
||||
log.info('Executing command {0} in directory {1}'.format(cmd, cwd))
|
||||
stderr = subprocess.Popen(cmd,
|
||||
shell=True,
|
||||
cwd=cwd,
|
||||
stderr=subprocess.PIPE).communicate()[0]
|
||||
log.debug(stderr)
|
||||
return stderr
|
||||
|
||||
def run_all(cmd, cwd=DEFAULT_CWD):
|
||||
'''
|
||||
Execute the passed command and return a dict of return data
|
||||
|
||||
CLI Example:
|
||||
salt '*' cmd.run_all "ls -l | grep foo | awk '{print $2}'"
|
||||
'''
|
||||
log.info('Executing command {0} in directory {1}'.format(cmd, cwd))
|
||||
ret = {}
|
||||
proc = subprocess.Popen(cmd,
|
||||
shell=True,
|
||||
cwd=cwd,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE)
|
||||
out = proc.communicate()
|
||||
ret['stdout'] = out[0]
|
||||
ret['stderr'] = out[1]
|
||||
ret['retcode'] = proc.returncode
|
||||
ret['pid'] = proc.pid
|
||||
if not ret['retcode']:
|
||||
log.error('Command {0} failed'.format(cmd))
|
||||
log.error('stdout: {0}'.format(ret['stdout']))
|
||||
log.error('stderr: {0}'.format(ret['stderr']))
|
||||
else:
|
||||
log.info('stdout: {0}'.format(ret['stdout']))
|
||||
log.info('stderr: {0}'.format(ret['stderr']))
|
||||
return ret
|
||||
|
||||
def retcode(cmd, cwd=DEFAULT_CWD):
|
||||
'''
|
||||
Execute a shell command and return the command's return code.
|
||||
|
||||
CLI Example:
|
||||
salt '*' cmd.retcode "file /bin/bash"
|
||||
'''
|
||||
log.info('Executing command {0} in directory {1}'.format(cmd, cwd))
|
||||
return subprocess.call(cmd, shell=True, cwd=cwd)
|
||||
|
||||
def has_exec(cmd):
|
||||
'''
|
||||
Returns true if the executable is available on the minion, false otherwise
|
||||
|
||||
CLI Example:
|
||||
salt '*' cat
|
||||
'''
|
||||
if cmd.startswith('/'):
|
||||
return _is_exec(cmd)
|
||||
for path in os.environ['PATH'].split(os.pathsep):
|
||||
fn_ = os.path.join(path, cmd)
|
||||
if _is_exec(fn_):
|
||||
return True
|
||||
return False
|
||||
|
||||
def exec_code(lang, code):
|
||||
'''
|
||||
Pass in two strings, the first naming the executable language, aka -
|
||||
python2, python3, ruby, perl, lua, etc. the second string containing
|
||||
the code you wish to execute. The stdout and stderr will be returned
|
||||
|
||||
CLI Example:
|
||||
salt '*' cmd.exec_code ruby 'puts "cheese"'
|
||||
'''
|
||||
cfn = tempfile.mkstemp()
|
||||
open(cfn, 'w+').write(code)
|
||||
return subprocess.Popen(lang + ' ' + cfn,
|
||||
shell=True,
|
||||
cwd=cwd,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.STDOUT).communicate()[0]
|
|
@ -1,73 +0,0 @@
|
|||
'''
|
||||
Minion side functions for salt-cp
|
||||
'''
|
||||
# Import python libs
|
||||
import os
|
||||
import hashlib
|
||||
|
||||
# Import salt libs
|
||||
import salt.minion
|
||||
|
||||
# Import Third Party Libs
|
||||
import zmq
|
||||
|
||||
def recv(files, dest):
|
||||
'''
|
||||
Used with salt-cp, pass the files dict, and the destination.
|
||||
|
||||
This function receives small fast copy files from the master via salt-cp
|
||||
'''
|
||||
ret = {}
|
||||
for path, data in files.items():
|
||||
final = ''
|
||||
if os.path.basename(path) == os.path.basename(dest)\
|
||||
and not os.path.isdir(dest):
|
||||
final = dest
|
||||
elif os.path.isdir(dest):
|
||||
final = os.path.join(dest, os.path.basename(path))
|
||||
elif os.path.isdir(os.path.dirname(dest)):
|
||||
final = dest
|
||||
else:
|
||||
return 'Destination unavailable'
|
||||
|
||||
try:
|
||||
open(final, 'w+').write(data)
|
||||
ret[final] = True
|
||||
except IOError:
|
||||
ret[final] = False
|
||||
|
||||
return ret
|
||||
|
||||
def get_file(path, dest, env='base'):
|
||||
'''
|
||||
Used to get a single file from the salt master
|
||||
'''
|
||||
client = salt.minion.FileClient(__opts__)
|
||||
return client.get_file(path, dest, False, env)
|
||||
|
||||
def cache_files(paths, env='base'):
|
||||
'''
|
||||
Used to gather many files from the master, the gathered files will be
|
||||
saved in the minion cachedir reflective to the paths retrieved from the
|
||||
master.
|
||||
'''
|
||||
client = salt.minion.FileClient(__opts__)
|
||||
return client.cache_files(paths, env)
|
||||
|
||||
def cache_file(path, env='base'):
|
||||
'''
|
||||
Used to cache a single file in the local salt-master file cache.
|
||||
'''
|
||||
client = salt.minion.FileClient(__opts__)
|
||||
return client.cache_file(path, env)
|
||||
|
||||
def hash_file(path, env='base'):
|
||||
'''
|
||||
Return the hash of a file, to get the hash of a file on the
|
||||
salt master file server prepend the path with salt://<file on server>
|
||||
otherwise, prepend the file with / for a local file.
|
||||
|
||||
CLI Example:
|
||||
'''
|
||||
client = salt.minion.FileClient(__opts__)
|
||||
return client.hash_file(path, env)
|
|
@ -1,77 +0,0 @@
|
|||
'''
|
||||
Work with cron
|
||||
'''
|
||||
import tempfile
|
||||
|
||||
def __append_special(user, special, cmd):
|
||||
'''
|
||||
Append the value to the crontab
|
||||
'''
|
||||
tmp = tempfile.mkstemp()
|
||||
tmpd = open(tmp, 'w+')
|
||||
tmpd.write(raw_cron(user))
|
||||
tmpd.write(_render_special(special, cmd))
|
||||
cmd = 'crontab {0} -u {1}'.format(tmp, user)
|
||||
return __salt__['cmd.run'](cmd)
|
||||
|
||||
def _render_special(special, cmd):
|
||||
'''
|
||||
Take a special string and a command string and render it
|
||||
'''
|
||||
return '{0} {1}'.format(special, cmd)
|
||||
|
||||
def raw_cron(user):
|
||||
'''
|
||||
Return the contents of the user's crontab
|
||||
'''
|
||||
cmd = 'crontab -l -u {0}'.format(user)
|
||||
return __salt__['cmd.run_stdout'](cmd)
|
||||
|
||||
def list_tab(user):
|
||||
'''
|
||||
Return the contents of the specified user's crontab
|
||||
|
||||
CLI Example:
|
||||
salt '*' cron.list_tab root
|
||||
'''
|
||||
data = raw_cron(user)
|
||||
ret = {'crons': [],
|
||||
'special': []}
|
||||
for line in data.split('\n'):
|
||||
if line.startswith('@'):
|
||||
# Its a "special" line
|
||||
dat = {}
|
||||
comps = line.split()
|
||||
if len(comps) < 2:
|
||||
# Invalid line
|
||||
continue
|
||||
dat['spec'] = comps[0]
|
||||
dat['cmd'] = ' '.join(comps[1:])
|
||||
ret['special'].append(dat)
|
||||
if len(line.split()) > 5:
|
||||
# Appears to be a standard cron line
|
||||
comps = line.split()
|
||||
dat = {}
|
||||
dat['min'] = comps[0]
|
||||
dat['hour'] = comps[1]
|
||||
dat['daymonth'] = comps[2]
|
||||
dat['month'] = comps[3]
|
||||
dat['dayweek'] = comps[4]
|
||||
dat['cmd'] = ' '.join(comps[5:])
|
||||
ret['crons'].append(dat)
|
||||
return ret
|
||||
|
||||
def set_special(user, special, cmd):
|
||||
'''
|
||||
Set up a special command in the crontab.
|
||||
|
||||
CLI Example:
|
||||
salt '*' cron.set_special @hourly 'echo foobar'
|
||||
'''
|
||||
tab = list_tab(user)
|
||||
# If the special is set, return True
|
||||
for dat in tab['special']:
|
||||
if dat['spec'] == special and dat['cmd'] == cmd:
|
||||
return True
|
||||
return __append_special(user, special, cmd)
|
||||
|
|
@ -1,64 +0,0 @@
|
|||
'''
|
||||
Module for running arbitrary tests
|
||||
'''
|
||||
|
||||
import time
|
||||
|
||||
def echo(text):
|
||||
'''
|
||||
Return a string - used for testing the connection
|
||||
|
||||
CLI Example:
|
||||
salt '*' test.echo 'foo bar baz quo qux'
|
||||
'''
|
||||
print 'Echo got called!'
|
||||
return text
|
||||
|
||||
def ping():
|
||||
'''
|
||||
Just used to make sure the minion is up and responding
|
||||
Return True
|
||||
|
||||
CLI Example:
|
||||
salt '*' test.ping
|
||||
'''
|
||||
return True
|
||||
|
||||
def fib(long num):
|
||||
'''
|
||||
Return a Fibonacci sequence up to the passed number, and the time it took
|
||||
to compute in seconds. Used for performance tests
|
||||
|
||||
CLI Example:
|
||||
salt '*' test.fib 3
|
||||
'''
|
||||
cdef float start = time.time()
|
||||
cdef long a = 0
|
||||
cdef long b = 1
|
||||
ret = [0]
|
||||
while b < num:
|
||||
ret.append(b)
|
||||
a, b = b, a + b
|
||||
cdef float end = time.time() - start
|
||||
return ret, end
|
||||
|
||||
def collatz(long start):
|
||||
'''
|
||||
Execute the collatz conjecture from the passed starting number, returns
|
||||
the sequence and the time it took to compute. Used for performance tests.
|
||||
|
||||
CLI Example:
|
||||
salt '*' test.collatz 3
|
||||
'''
|
||||
cdef float begin = time.time()
|
||||
steps = []
|
||||
while start != 1:
|
||||
steps.append(start)
|
||||
if start > 1:
|
||||
if start % 2 == 0:
|
||||
start = start / 2
|
||||
else:
|
||||
start = start * 3 + 1
|
||||
cdef float end = time.time() - begin
|
||||
return steps, end
|
||||
|
|
@ -1,31 +0,0 @@
|
|||
'''
|
||||
Module for gathering disk information
|
||||
'''
|
||||
import subprocess
|
||||
|
||||
def usage():
|
||||
'''
|
||||
Return usage information for volumes mounted on this minion
|
||||
|
||||
CLI Example:
|
||||
salt '*' disk.usage
|
||||
'''
|
||||
cmd = 'df -P'
|
||||
ret = {}
|
||||
out = subprocess.Popen(cmd,
|
||||
shell=True,
|
||||
stdout=subprocess.PIPE).communicate()[0].split('\n')
|
||||
for line in out:
|
||||
if not line.count(' '):
|
||||
continue
|
||||
if line.startswith('Filesystem'):
|
||||
continue
|
||||
comps = line.split()
|
||||
ret[comps[0]] = {
|
||||
'1K-blocks': comps[1],
|
||||
'used': comps[2],
|
||||
'available': comps[3],
|
||||
'capacity': comps[4],
|
||||
'mountpoint':comps[5]
|
||||
}
|
||||
return ret
|
295
debian/salt/usr/share/pyshared/salt/modules/file.py
vendored
295
debian/salt/usr/share/pyshared/salt/modules/file.py
vendored
|
@ -1,295 +0,0 @@
|
|||
'''
|
||||
Manage information about files on the minion, set/read user, group, and mode
|
||||
data
|
||||
'''
|
||||
|
||||
# TODO
|
||||
# We should add the capability to do u+r type operations here some time in the
|
||||
# future
|
||||
|
||||
import os
|
||||
import grp
|
||||
import pwd
|
||||
import hashlib
|
||||
|
||||
import salt.utils.find
|
||||
|
||||
def gid_to_group(gid):
|
||||
'''
|
||||
Convert the group id to the group name on this system
|
||||
|
||||
CLI Example:
|
||||
salt '*' file.gid_to_group 0
|
||||
'''
|
||||
try:
|
||||
return grp.getgrgid(gid).gr_name
|
||||
except KeyError:
|
||||
return ''
|
||||
|
||||
def group_to_gid(group):
|
||||
'''
|
||||
Convert the group to the gid on this system
|
||||
|
||||
CLI Example:
|
||||
salt '*' file.group_to_gid root
|
||||
'''
|
||||
try:
|
||||
return grp.getgrnam(group).gr_gid
|
||||
except KeyError:
|
||||
return ''
|
||||
|
||||
def get_gid(path):
|
||||
'''
|
||||
Return the user that owns a given file
|
||||
|
||||
CLI Example:
|
||||
salt '*' file.get_gid /etc/passwd
|
||||
'''
|
||||
if not os.path.isfile(path):
|
||||
return -1
|
||||
return os.stat(path).st_gid
|
||||
|
||||
def get_group(path):
|
||||
'''
|
||||
Return the user that owns a given file
|
||||
|
||||
CLI Example:
|
||||
salt '*' file.get_group /etc/passwd
|
||||
'''
|
||||
gid = get_gid(path)
|
||||
if gid == -1:
|
||||
return False
|
||||
return gid_to_group(gid)
|
||||
|
||||
def uid_to_user(uid):
|
||||
'''
|
||||
Convert a uid to a user name
|
||||
|
||||
CLI Example:
|
||||
salt '*' file.uid_to_user 0
|
||||
'''
|
||||
try:
|
||||
return pwd.getpwuid(uid).pw_name
|
||||
except KeyError:
|
||||
return ''
|
||||
|
||||
def user_to_uid(user):
|
||||
'''
|
||||
Convert user name to a gid
|
||||
|
||||
CLI Example:
|
||||
salt '*' file.user_to_uid root
|
||||
'''
|
||||
try:
|
||||
return pwd.getpwnam(user).pw_uid
|
||||
except KeyError:
|
||||
return ''
|
||||
|
||||
def get_uid(path):
|
||||
'''
|
||||
Return the user that owns a given file
|
||||
|
||||
CLI Example:
|
||||
salt '*' file.get_uid /etc/passwd
|
||||
'''
|
||||
if not os.path.isfile(path):
|
||||
return False
|
||||
return os.stat(path).st_uid
|
||||
|
||||
def get_user(path):
|
||||
'''
|
||||
Return the user that owns a given file
|
||||
|
||||
CLI Example:
|
||||
salt '*' file.get_user /etc/passwd
|
||||
'''
|
||||
uid = get_uid(path)
|
||||
if uid == -1:
|
||||
return False
|
||||
return uid_to_user(uid)
|
||||
|
||||
def get_mode(path):
|
||||
'''
|
||||
Return the mode of a file
|
||||
|
||||
CLI Example:
|
||||
salt '*' file.get_mode /etc/passwd
|
||||
'''
|
||||
if not os.path.isfile(path):
|
||||
return -1
|
||||
mode = str(oct(os.stat(path).st_mode)[-4:])
|
||||
if mode.startswith('0'):
|
||||
return mode[1:]
|
||||
return mode
|
||||
|
||||
def set_mode(path, mode):
|
||||
'''
|
||||
Set the more of a file
|
||||
|
||||
CLI Example:
|
||||
salt '*' file.set_mode /etc/passwd 0644
|
||||
'''
|
||||
mode = str(mode)
|
||||
if not os.path.isfile(path):
|
||||
return 'File not found'
|
||||
try:
|
||||
os.chmod(path, int(mode, 8))
|
||||
except:
|
||||
return 'Invalid Mode ' + mode
|
||||
return get_mode(path)
|
||||
|
||||
def chown(path, user, group):
|
||||
'''
|
||||
Chown a file, pass the file the desired user and group
|
||||
|
||||
CLI Example:
|
||||
salt '*' file.chown /etc/passwd root root
|
||||
'''
|
||||
uid = user_to_uid(user)
|
||||
gid = group_to_gid(group)
|
||||
err = ''
|
||||
if uid == '':
|
||||
err += 'User does not exist\n'
|
||||
if gid == '':
|
||||
err += 'Group does not exist\n'
|
||||
if not os.path.isfile(path):
|
||||
err += 'File not found'
|
||||
if err:
|
||||
return err
|
||||
return os.chown(path, uid, gid)
|
||||
|
||||
def chgrp(path, group):
|
||||
'''
|
||||
Change the group of a file
|
||||
|
||||
CLI Example:
|
||||
salt '*' file.chgrp /etc/passwd root
|
||||
'''
|
||||
gid = group_to_gid(group)
|
||||
err = ''
|
||||
if gid == '':
|
||||
err += 'Group does not exist\n'
|
||||
if not os.path.isfile(path):
|
||||
err += 'File not found'
|
||||
if err:
|
||||
return err
|
||||
user = get_user(path)
|
||||
return chown(path, user, group)
|
||||
|
||||
def get_sum(path, form='md5'):
|
||||
'''
|
||||
Return the sum for the given file, default is md5, sha1, sha224, sha256,
|
||||
sha384, sha512 are supported
|
||||
|
||||
CLI Example:
|
||||
salt '*' /etc/passwd sha512
|
||||
'''
|
||||
if not os.path.isfile(path):
|
||||
return 'File not found'
|
||||
try:
|
||||
return getattr(hashlib, form)(open(path, 'rb').read()).hexdigest()
|
||||
except (IOError, OSError), e:
|
||||
return 'File Error: %s' % (str(e))
|
||||
except AttributeError, e:
|
||||
return 'Hash ' + form + ' not supported'
|
||||
except NameError, e:
|
||||
return 'Hashlib unavailable - please fix your python install'
|
||||
except Exception, e:
|
||||
return str(e)
|
||||
|
||||
def find(path, *opts):
|
||||
'''
|
||||
Approximate the Unix find(1) command and return a list of paths that
|
||||
meet the specified critera.
|
||||
|
||||
The options include match criteria:
|
||||
name = path-glob # case sensitive
|
||||
iname = path-glob # case insensitive
|
||||
regex = path-regex # case sensitive
|
||||
iregex = path-regex # case insensitive
|
||||
type = file-types # match any listed type
|
||||
user = users # match any listed user
|
||||
group = groups # match any listed group
|
||||
size = [+-]number[size-unit] # default unit = byte
|
||||
mtime = interval # modified since date
|
||||
grep = regex # search file contents
|
||||
and/or actions:
|
||||
delete [= file-types] # default type = 'f'
|
||||
exec = command [arg ...] # where {} is replaced by pathname
|
||||
print [= print-opts]
|
||||
|
||||
The default action is 'print=path'.
|
||||
|
||||
file-glob:
|
||||
* = match zero or more chars
|
||||
? = match any char
|
||||
[abc] = match a, b, or c
|
||||
[!abc] or [^abc] = match anything except a, b, and c
|
||||
[x-y] = match chars x through y
|
||||
[!x-y] or [^x-y] = match anything except chars x through y
|
||||
{a,b,c} = match a or b or c
|
||||
|
||||
path-regex:
|
||||
a Python re (regular expression) pattern to match pathnames
|
||||
|
||||
file-types: a string of one or more of the following:
|
||||
a: all file types
|
||||
b: block device
|
||||
c: character device
|
||||
d: directory
|
||||
p: FIFO (named pipe)
|
||||
f: plain file
|
||||
l: symlink
|
||||
s: socket
|
||||
|
||||
users:
|
||||
a space and/or comma separated list of user names and/or uids
|
||||
|
||||
groups:
|
||||
a space and/or comma separated list of group names and/or gids
|
||||
|
||||
size-unit:
|
||||
b: bytes
|
||||
k: kilobytes
|
||||
m: megabytes
|
||||
g: gigabytes
|
||||
t: terabytes
|
||||
|
||||
interval:
|
||||
[<num>w] [<num>[d]] [<num>h] [<num>m] [<num>s]
|
||||
|
||||
where:
|
||||
w: week
|
||||
d: day
|
||||
h: hour
|
||||
m: minute
|
||||
s: second
|
||||
|
||||
print-opts: a comma and/or space separated list of one or more of the following:
|
||||
group: group name
|
||||
md5: MD5 digest of file contents
|
||||
mode: file permissions (as integer)
|
||||
mtime: last modification time (as time_t)
|
||||
name: file basename
|
||||
path: file absolute path
|
||||
size: file size in bytes
|
||||
type: file type
|
||||
user: user name
|
||||
|
||||
CLI Examples:
|
||||
salt '*' / type=f name=\*.bak size=+10m
|
||||
salt '*' /var mtime=+30d size=+10m print=path,size,mtime
|
||||
salt '*' /var/log name=\*.[0-9] mtime=+30d size=+10m delete
|
||||
'''
|
||||
opts_dict = {}
|
||||
for opt in opts:
|
||||
key, value = opt.split('=', 1)
|
||||
opts_dict[key] = value
|
||||
try:
|
||||
f = salt.utils.find.Finder(opts_dict)
|
||||
except ValueError, ex:
|
||||
return 'error: {0}'.format(ex)
|
||||
|
||||
ret = [p for p in f.find(path)]
|
||||
ret.sort()
|
||||
return ret
|
|
@ -1,25 +0,0 @@
|
|||
'''
|
||||
Control aspects of the grains data
|
||||
'''
|
||||
# Seed the grains dict so cython will build
|
||||
__grains__ = {}
|
||||
|
||||
def items():
|
||||
'''
|
||||
Return the grains data
|
||||
|
||||
CLI Example:
|
||||
salt '*' grains.items
|
||||
'''
|
||||
return __grains__
|
||||
|
||||
def item(key):
|
||||
'''
|
||||
Return a singe component of the grains data
|
||||
|
||||
CLI Example:
|
||||
salt '*' grains.item os
|
||||
'''
|
||||
if __grains__.has_key(key):
|
||||
return __grains__[key]
|
||||
return ''
|
|
@ -1,81 +0,0 @@
|
|||
'''
|
||||
Manage groups on Linux
|
||||
'''
|
||||
# Import python libs
|
||||
import grp
|
||||
|
||||
def __virtual__():
|
||||
'''
|
||||
Set the user module if the kernel is Linux
|
||||
'''
|
||||
return 'group' if __grains__['kernel'] == 'Linux' else False
|
||||
|
||||
def add(name, gid=None):
|
||||
'''
|
||||
Add the specified group
|
||||
|
||||
CLI Example:
|
||||
salt '*' group.add foo 3456
|
||||
'''
|
||||
cmd = 'groupadd '
|
||||
if gid:
|
||||
cmd += '-g {0} '.format(gid)
|
||||
cmd += name
|
||||
|
||||
ret = __salt__['cmd.run_all'](cmd)
|
||||
|
||||
return not ret['retcode']
|
||||
|
||||
def delete(name):
|
||||
'''
|
||||
Remove the named group
|
||||
|
||||
CLI Example:
|
||||
salt '*' group.delete foo
|
||||
'''
|
||||
ret = __salt__['cmd.run_all']('groupdel {0}'.format(name))
|
||||
|
||||
return not ret['retcode']
|
||||
|
||||
def info(name):
|
||||
'''
|
||||
Return information about a group
|
||||
|
||||
CLI Example:
|
||||
salt '*' group.info foo
|
||||
'''
|
||||
grinfo = grp.getgrnam(name)
|
||||
return {'name': grinfo.gr_name,
|
||||
'passwd': grinfo.gr_passwd,
|
||||
'gid': grinfo.gr_gid,
|
||||
'members': grinfo.gr_mem}
|
||||
|
||||
def getent():
|
||||
'''
|
||||
Return info on all groups
|
||||
|
||||
CLI Example:
|
||||
salt '*' group.getent
|
||||
'''
|
||||
ret = []
|
||||
for grinfo in grp.getgrall():
|
||||
ret.append(info(grinfo.gr_name))
|
||||
return ret
|
||||
|
||||
def chgid(name, gid):
|
||||
'''
|
||||
Change the default shell of the user
|
||||
|
||||
CLI Example:
|
||||
salt '*' user.chshell foo /bin/zsh
|
||||
'''
|
||||
pre_gid = __salt__['file.group_to_gid'](name)
|
||||
if gid == pre_gid:
|
||||
return True
|
||||
cmd = 'groupmod -g {0} {1}'.format(gid, name)
|
||||
__salt__['cmd.run'](cmd)
|
||||
post_gid = __salt__['file.group_to_gid'](name)
|
||||
if post_gid != pre_gid:
|
||||
if post_gid == gid:
|
||||
return True
|
||||
return False
|
163
debian/salt/usr/share/pyshared/salt/modules/hosts.py
vendored
163
debian/salt/usr/share/pyshared/salt/modules/hosts.py
vendored
|
@ -1,163 +0,0 @@
|
|||
'''
|
||||
Manage the information in the hosts file
|
||||
'''
|
||||
import os
|
||||
|
||||
def list_hosts():
|
||||
'''
|
||||
Return the hosts found in the hosts file in this format:
|
||||
|
||||
{'<ip addr>': ['alias1', 'alias2', ...]}
|
||||
|
||||
CLI Example:
|
||||
salt '*' hosts.list_hosts
|
||||
'''
|
||||
hfn = '/etc/hosts'
|
||||
ret = {}
|
||||
if not os.path.isfile(hfn):
|
||||
return ret
|
||||
for line in open(hfn).readlines():
|
||||
line = line.strip()
|
||||
if not line:
|
||||
continue
|
||||
if line.startswith('#'):
|
||||
continue
|
||||
comps = line.split()
|
||||
ret[comps[0]] = comps[1:]
|
||||
return ret
|
||||
|
||||
def get_ip(host):
|
||||
'''
|
||||
Return the ip associated with the named host
|
||||
|
||||
CLI Example:
|
||||
salt '*' hosts.get_ip <hostname>
|
||||
'''
|
||||
hosts = list_hosts()
|
||||
if not hosts:
|
||||
return ''
|
||||
# Look for the op
|
||||
for addr in hosts:
|
||||
if hosts[addr].count(host):
|
||||
return addr
|
||||
# ip not found
|
||||
return ''
|
||||
|
||||
def get_alias(ip):
|
||||
'''
|
||||
Return the list of aliases associated with an ip
|
||||
|
||||
CLI Example:
|
||||
salt '*' hosts.get_alias <ip addr>
|
||||
'''
|
||||
hosts = list_hosts()
|
||||
if hosts.has_key(ip):
|
||||
return hosts[ip]
|
||||
return []
|
||||
|
||||
def has_pair(ip, alias):
|
||||
'''
|
||||
Return true if the alias is set
|
||||
|
||||
CLI Example:
|
||||
salt '*' hosts.has_pair <ip> <alias>
|
||||
'''
|
||||
hosts = list_hosts()
|
||||
if not hosts.has_key(ip):
|
||||
return False
|
||||
if hosts[ip].count(alias):
|
||||
return True
|
||||
return False
|
||||
|
||||
def set_host(ip, alias):
|
||||
'''
|
||||
Set the host entry in th hosts file for the given ip, this will overwrite
|
||||
any previous entry for the given ip
|
||||
|
||||
CLI Example:
|
||||
salt '*' hosts.set_host <ip> <alias>
|
||||
'''
|
||||
hfn = '/etc/hosts'
|
||||
ovr = False
|
||||
if not os.path.isfile(hfn):
|
||||
return False
|
||||
lines = open(hfn).readlines()
|
||||
for ind in range(len(lines)):
|
||||
tmpline = lines[ind].strip()
|
||||
if not tmpline:
|
||||
continue
|
||||
if tmpline.startswith('#'):
|
||||
continue
|
||||
comps = tmpline.split()
|
||||
if comps[0] == ip:
|
||||
lines[ind] = ip + '\t\t' + alias + '\n'
|
||||
ovr = True
|
||||
if not ovr:
|
||||
line = ip + '\t\t' + alias + '\n'
|
||||
lines.append(line)
|
||||
open(hfn, 'w+').writelines(lines)
|
||||
return True
|
||||
|
||||
def rm_host(ip, alias):
|
||||
'''
|
||||
Remove a host entry from the hosts file
|
||||
|
||||
CLI Example:
|
||||
salt '*' hosts.rm_host <ip> <alias>
|
||||
'''
|
||||
if not has_pair(ip, alias):
|
||||
return True
|
||||
hfn = '/etc/hosts'
|
||||
lines = open(hfn).readlines()
|
||||
for ind in range(len(lines)):
|
||||
tmpline = lines[ind].strip()
|
||||
if not tmpline:
|
||||
continue
|
||||
if tmpline.startswith('#'):
|
||||
continue
|
||||
comps = tmpline.split()
|
||||
if comps[0] == ip:
|
||||
newline = comps[0] + '\t'
|
||||
for existing in comps[1:]:
|
||||
if existing == alias:
|
||||
continue
|
||||
newline += '\t' + existing
|
||||
if newline.strip() == ip:
|
||||
lines[ind] = ''
|
||||
else:
|
||||
lines[ind] = newline
|
||||
open(hfn, 'w+').writelines(lines)
|
||||
return True
|
||||
|
||||
def add_host(ip, alias):
|
||||
'''
|
||||
Add a host to an existing entry, if the entry is not in place then create
|
||||
it with the given host
|
||||
|
||||
CLI Example:
|
||||
salt '*' hosts.add_host <ip> <alias>
|
||||
'''
|
||||
hfn = '/etc/hosts'
|
||||
ovr = False
|
||||
if not os.path.isfile(hfn):
|
||||
return False
|
||||
lines = open(hfn).readlines()
|
||||
for ind in range(len(lines)):
|
||||
tmpline = lines[ind].strip()
|
||||
if not tmpline:
|
||||
continue
|
||||
if tmpline.startswith('#'):
|
||||
continue
|
||||
comps = tmpline.split()
|
||||
if comps[0] == ip:
|
||||
newline = comps[0] + '\t'
|
||||
for existing in comps[1:]:
|
||||
newline += '\t' + existing
|
||||
newline += '\t' + alias
|
||||
lines.append(newline)
|
||||
ovr = True
|
||||
if not ovr:
|
||||
line = ip + '\t\t' + alias + '\n'
|
||||
lines.append(line)
|
||||
open(hfn, 'w+').writelines(lines)
|
||||
return True
|
|
@ -1,73 +0,0 @@
|
|||
'''
|
||||
Salt module to manage RAID arrays with mdadm
|
||||
'''
|
||||
# Import python libs
|
||||
import os
|
||||
import logging
|
||||
|
||||
# Set up logger
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
def __virtual__():
|
||||
'''
|
||||
mdadm provides raid functions for Linux
|
||||
'''
|
||||
return 'raid' if __grains__['kernel'] == 'Linux' else False
|
||||
|
||||
def list():
|
||||
'''
|
||||
List the RAID devices.
|
||||
|
||||
CLI Example:
|
||||
salt '*' mdadm.list
|
||||
'''
|
||||
ret = {}
|
||||
for line in __salt__['cmd.run_stdout']('mdadm --detail --scan').split('\n'):
|
||||
if not line.count(' '):
|
||||
continue
|
||||
comps = line.split()
|
||||
metadata = comps[2].split('=')
|
||||
raidname = comps[3].split('=')
|
||||
raiduuid = comps[4].split('=')
|
||||
ret[comps[1]] = {
|
||||
'device': comps[1],
|
||||
'metadata': metadata[1],
|
||||
'name': raidname[1],
|
||||
'uuid': raiduuid[1],
|
||||
}
|
||||
return ret
|
||||
|
||||
def detail(device='/dev/md0'):
|
||||
'''
|
||||
Show detail for a specified RAID device
|
||||
|
||||
CLI Example:
|
||||
salt '*' mdadm.detail '/dev/md0'
|
||||
'''
|
||||
ret = {}
|
||||
ret['members'] = {}
|
||||
cmd = 'mdadm --detail %s' % device
|
||||
for line in __salt__['cmd.run_stdout'](cmd).split('\n'):
|
||||
if line.startswith(device):
|
||||
continue
|
||||
if not line.count(' '):
|
||||
continue
|
||||
if not ':' in line:
|
||||
if '/dev/' in line:
|
||||
comps = line.split()
|
||||
state = comps[4:-1]
|
||||
ret['members'][comps[0]] = {
|
||||
'number': comps[0],
|
||||
'major': comps[1],
|
||||
'minor': comps[2],
|
||||
'raiddevice': comps[3],
|
||||
'state': ' '.join(state),
|
||||
'device': comps[-1],
|
||||
}
|
||||
continue
|
||||
comps = line.split(' : ')
|
||||
comps[0] = comps[0].lower()
|
||||
comps[0] = comps[0].strip()
|
||||
comps[0] = comps[0].replace(' ', '_')
|
||||
ret[comps[0]] = comps[1].strip()
|
||||
return ret
|
|
@ -1,134 +0,0 @@
|
|||
'''
|
||||
Module for gathering and managing information about MooseFS
|
||||
'''
|
||||
|
||||
def dirinfo(path, opts=None):
|
||||
'''
|
||||
Return information on a directory located on the Moose
|
||||
|
||||
CLI Example:
|
||||
salt '*' moosefs.dirinfo /path/to/dir/ [-[n][h|H]]
|
||||
'''
|
||||
cmd = 'mfsdirinfo'
|
||||
ret = {}
|
||||
if opts:
|
||||
cmd += ' -' + opts
|
||||
cmd += ' ' + path
|
||||
out = __salt__['cmd.run_all'](cmd)
|
||||
|
||||
output = out['stdout'].split('\n')
|
||||
for line in output:
|
||||
if not line.count(' '):
|
||||
continue
|
||||
comps = line.split(':')
|
||||
ret[comps[0].strip()] = comps[1].strip()
|
||||
return ret
|
||||
|
||||
def fileinfo(path):
|
||||
'''
|
||||
Return information on a file located on the Moose
|
||||
|
||||
CLI Example:
|
||||
salt '*' moosefs.fileinfo /path/to/dir/
|
||||
'''
|
||||
cmd = 'mfsfileinfo ' + path
|
||||
ret = {}
|
||||
chunknum = ''
|
||||
out = __salt__['cmd.run_all'](cmd)
|
||||
|
||||
output = out['stdout'].split('\n')
|
||||
for line in output:
|
||||
if not line.count(' '):
|
||||
continue
|
||||
if '/' in line:
|
||||
comps = line.split('/')
|
||||
|
||||
chunknum = comps[0].strip().split(':')
|
||||
meta = comps[1].strip().split(' ')
|
||||
|
||||
chunk = chunknum[0].replace('chunk ', '')
|
||||
loc = chunknum[1].strip()
|
||||
id_ = meta[0].replace('(id:', '')
|
||||
ver = meta[1].replace(')', '').replace('ver:', '')
|
||||
|
||||
ret[chunknum[0]] = {
|
||||
'chunk': chunk,
|
||||
'loc': loc,
|
||||
'id': id_,
|
||||
'ver': ver,
|
||||
}
|
||||
if 'copy' in line:
|
||||
copyinfo = line.strip().split(':')
|
||||
ret[chunknum[0]][copyinfo[0]] = {
|
||||
'copy': copyinfo[0].replace('copy ', ''),
|
||||
'ip': copyinfo[1].strip(),
|
||||
'port': copyinfo[2],
|
||||
}
|
||||
return ret
|
||||
|
||||
def mounts():
|
||||
'''
|
||||
Return a list of current MooseFS mounts
|
||||
|
||||
CLI Example:
|
||||
salt '*' moosefs.mounts
|
||||
'''
|
||||
cmd = 'mount'
|
||||
ret = {}
|
||||
out = __salt__['cmd.run_all'](cmd)
|
||||
|
||||
output = out['stdout'].split('\n')
|
||||
for line in output:
|
||||
if not line.count(' '):
|
||||
continue
|
||||
if 'fuse.mfs' in line:
|
||||
comps = line.split(' ')
|
||||
info1 = comps[0].split(':')
|
||||
info2 = info1[1].split('/')
|
||||
ret[comps[2]] = {
|
||||
'remote': {
|
||||
'master' : info1[0],
|
||||
'port' : info2[0],
|
||||
'subfolder': '/' + info2[1],
|
||||
},
|
||||
'local': comps[2],
|
||||
'options': comps[5].replace('(','').replace(')','').split(','),
|
||||
}
|
||||
return ret
|
||||
|
||||
def getgoal(path, opts=None):
|
||||
'''
|
||||
Return goal(s) for a file or directory
|
||||
|
||||
CLI Example:
|
||||
salt '*' moosefs.getgoal /path/to/file [-[n][h|H]]
|
||||
salt '*' moosefs.getgoal /path/to/dir/ [-[n][h|H][r]]
|
||||
'''
|
||||
cmd = 'mfsgetgoal'
|
||||
ret = {}
|
||||
if opts:
|
||||
cmd += ' -' + opts
|
||||
else:
|
||||
opts = ''
|
||||
cmd += ' ' + path
|
||||
out = __salt__['cmd.run_all'](cmd)
|
||||
|
||||
output = out['stdout'].split('\n')
|
||||
if not 'r' in opts:
|
||||
goal = output[0].split(': ')
|
||||
ret = {
|
||||
'goal': goal[1],
|
||||
}
|
||||
else:
|
||||
for line in output:
|
||||
if not line.count(' '):
|
||||
continue
|
||||
if path in line:
|
||||
continue
|
||||
comps = line.split()
|
||||
keytext = comps[0] + ' with goal'
|
||||
if not ret.has_key(keytext):
|
||||
ret[keytext] = {}
|
||||
ret[keytext][comps[3]] = comps[5]
|
||||
return ret
|
||||
|
226
debian/salt/usr/share/pyshared/salt/modules/mount.py
vendored
226
debian/salt/usr/share/pyshared/salt/modules/mount.py
vendored
|
@ -1,226 +0,0 @@
|
|||
'''
|
||||
Salt module to manage unix mounts and the fstab file
|
||||
'''
|
||||
# Import python libs
|
||||
import os
|
||||
import stat
|
||||
import logging
|
||||
|
||||
# Set up logger
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
def active():
|
||||
'''
|
||||
List the active mounts.
|
||||
|
||||
CLI Example:
|
||||
salt '*' mount.active
|
||||
'''
|
||||
ret = {}
|
||||
for line in __salt__['cmd.run_stdout']('mount').split('\n'):
|
||||
comps = line.split()
|
||||
if not len(comps) == 6:
|
||||
# Invalid entry
|
||||
continue
|
||||
ret[comps[2]] = {'device': comps[0],
|
||||
'fstype': comps[4],
|
||||
'opts': comps[5][1:-1].split(',')}
|
||||
return ret
|
||||
|
||||
def fstab(config='/etc/fstab'):
|
||||
'''
|
||||
List the contents of the fstab
|
||||
|
||||
CLI Example:
|
||||
salt '*' mount.fstab
|
||||
'''
|
||||
ret = {}
|
||||
if not os.path.isfile(config):
|
||||
return ret
|
||||
for line in open(config).readlines():
|
||||
if line.startswith('#'):
|
||||
# Commented
|
||||
continue
|
||||
if not line.strip():
|
||||
# Blank line
|
||||
continue
|
||||
comps = line.split()
|
||||
if not len(comps) == 6:
|
||||
# Invalid entry
|
||||
continue
|
||||
ret[comps[1]] = {'device': comps[0],
|
||||
'fstype': comps[2],
|
||||
'opts': comps[3].split(','),
|
||||
'dump': comps[4],
|
||||
'pass': comps[5]}
|
||||
return ret
|
||||
|
||||
def rm_fstab(name, config='/etc/fstab'):
|
||||
'''
|
||||
Remove the mount point from the fstab
|
||||
|
||||
CLI Example:
|
||||
salt '*' /mnt/foo
|
||||
'''
|
||||
contents = fstab(config)
|
||||
if not contents.has_key(name):
|
||||
return True
|
||||
# The entry is present, get rid of it
|
||||
lines = []
|
||||
for line in open(config).readlines():
|
||||
if line.startswith('#'):
|
||||
# Commented
|
||||
lines.append(line)
|
||||
continue
|
||||
if not line.strip():
|
||||
# Blank line
|
||||
lines.append(line)
|
||||
continue
|
||||
comps = line.split()
|
||||
if not len(comps) == 6:
|
||||
# Invalid entry
|
||||
lines.append(line)
|
||||
continue
|
||||
comps = line.split()
|
||||
if comps[1] == name:
|
||||
continue
|
||||
lines.append(line)
|
||||
open(config, 'w+').writelines(lines)
|
||||
return True
|
||||
|
||||
def set_fstab(
|
||||
name,
|
||||
device,
|
||||
fstype,
|
||||
opts='defaults',
|
||||
dump=0,
|
||||
pass_num=0,
|
||||
config='/etc/fstab',
|
||||
):
|
||||
'''
|
||||
Verify that this mount is represented in the fstab, chage the mount point
|
||||
to match the data passed, or add the mount if it is not present.
|
||||
|
||||
CLI Example:
|
||||
salt '*' mount.set_fstab /mnt/foo /dev/sdz1 ext4
|
||||
'''
|
||||
# Fix the opts type if it is a list
|
||||
if type(opts) == type(list()):
|
||||
opts = ','.join(opts)
|
||||
lines = []
|
||||
change = False
|
||||
present = False
|
||||
if not os.path.isfile(config):
|
||||
return 'bad config'
|
||||
for line in open(config).readlines():
|
||||
if line.startswith('#'):
|
||||
# Commented
|
||||
lines.append(line)
|
||||
continue
|
||||
if not line.strip():
|
||||
# Blank line
|
||||
lines.append(line)
|
||||
continue
|
||||
comps = line.split()
|
||||
if not len(comps) == 6:
|
||||
# Invalid entry
|
||||
lines.append(line)
|
||||
continue
|
||||
if comps[1] == name:
|
||||
# check to see if there are changes and fix them if there are
|
||||
present = True
|
||||
if comps[0] != device:
|
||||
change = True
|
||||
comps[0] = device
|
||||
if comps[2] != fstype:
|
||||
change = True
|
||||
comps[2] = fstype
|
||||
if comps[3] != opts:
|
||||
change = True
|
||||
comps[3] = opts
|
||||
if comps[4] != str(dump):
|
||||
change = True
|
||||
comps[4] = str(dump)
|
||||
if comps[5] != str(pass_num):
|
||||
change = True
|
||||
comps[5] = str(pass_num)
|
||||
if change:
|
||||
log.debug('fstab entry for mount point {0} is being updated'.format(name))
|
||||
newline = '{0}\t\t{1}\t{2}\t{3}\t{4} {5}\n'.format(
|
||||
device,
|
||||
name,
|
||||
fstype,
|
||||
opts,
|
||||
dump,
|
||||
pass_num)
|
||||
lines.append(newline)
|
||||
else:
|
||||
lines.append(line)
|
||||
if change:
|
||||
# The line was changed, commit it!
|
||||
open(config, 'w+').writelines(lines)
|
||||
return 'change'
|
||||
if not change and not present:
|
||||
# The entry is new, add it to the end of the fstab
|
||||
newline = '{0}\t\t{1}\t{2}\t{3}\t{4} {5}\n'.format(
|
||||
device,
|
||||
name,
|
||||
fstype,
|
||||
opts,
|
||||
dump,
|
||||
pass_num)
|
||||
lines.append(newline)
|
||||
open(config, 'w+').writelines(lines)
|
||||
if present and not change:
|
||||
# The right entry is already here
|
||||
return 'present'
|
||||
return 'new'
|
||||
|
||||
def mount(name, device, mkmnt=False, fstype='', opts='defaults'):
|
||||
'''
|
||||
Mount a device
|
||||
|
||||
CLI Example:
|
||||
salt '*' mount.mount /mnt/foo /dev/sdz1 True
|
||||
'''
|
||||
if type(opts) == type(str()):
|
||||
opts = opts.split(',')
|
||||
if not os.path.exists(device):
|
||||
return False
|
||||
if not stat.S_ISBLK(os.stat(device).st_mode):
|
||||
return False
|
||||
if not os.path.exists(name) and mkmnt:
|
||||
os.makedirs(name)
|
||||
lopts = ','.join(opts)
|
||||
cmd = 'mount -o {0} {1} {2} '.format(lopts, device, name)
|
||||
if fstype:
|
||||
cmd += ' -t {0}'.format(fstype)
|
||||
out = __salt__['cmd.run_all'](cmd)
|
||||
if not out['retcode']:
|
||||
return out['stderr']
|
||||
return True
|
||||
|
||||
def remount(name, device, mkmnt=False, fstype='', opts='defaults'):
|
||||
'''
|
||||
Attempt to remount a device, if the device is not already mounted, mount
|
||||
is called
|
||||
|
||||
CLI Example:
|
||||
salt '*' mount.remount /mnt/foo /dev/sdz1 True
|
||||
'''
|
||||
if type(opts) == type(str()):
|
||||
opts = opts.split(',')
|
||||
mnts = active()
|
||||
if mnts.has_key(name):
|
||||
# The mount point is mounted, attempt to remount it with the given data
|
||||
opts.append('remount')
|
||||
lopts = ','.join(opts)
|
||||
cmd = 'mount -o {0} {1} {2} '.format(lopts, device, name)
|
||||
if fstype:
|
||||
cmd += ' -t {0}'.format(fstype)
|
||||
out = __salt__['cmd.run_all'](cmd)
|
||||
if out['retcode']:
|
||||
return opt['stderr']
|
||||
return True
|
||||
else:
|
||||
return mount(name, device, mkmnt, fstype, opts)
|
|
@ -1,71 +0,0 @@
|
|||
'''
|
||||
Module to provide MySQL compatibility to salt.
|
||||
|
||||
In order to connect to MySQL, certain configuration is required
|
||||
in /etc/salt/minion on the relevant minions. Some sample configs
|
||||
might look like:
|
||||
|
||||
mysql.host: 'localhost'
|
||||
mysql.port: 3306
|
||||
mysql.user: 'root'
|
||||
mysql.pass: ''
|
||||
mysql.db: 'mysql'
|
||||
'''
|
||||
|
||||
import MySQLdb
|
||||
|
||||
__opts__ = {}
|
||||
|
||||
def connect():
|
||||
'''
|
||||
wrap authentication credentials here
|
||||
'''
|
||||
|
||||
hostname = __opts__['mysql.host']
|
||||
username = __opts__['mysql.user']
|
||||
password = __opts__['mysql.pass']
|
||||
dbport = __opts__['mysql.port']
|
||||
dbname = __opts__['mysql.db']
|
||||
|
||||
db = MySQLdb.connect(
|
||||
hostname,
|
||||
username,
|
||||
password,
|
||||
dbname,
|
||||
dbport,
|
||||
)
|
||||
|
||||
db.autocommit(True)
|
||||
return db
|
||||
|
||||
def status():
|
||||
'''
|
||||
Return the status of a MySQL server using the output
|
||||
from the SHOW STATUS query.
|
||||
|
||||
CLI Example:
|
||||
salt '*' mysql.status
|
||||
'''
|
||||
ret = {}
|
||||
db = connect()
|
||||
cur = db.cursor()
|
||||
cur.execute('SHOW STATUS')
|
||||
for i in xrange(cur.rowcount):
|
||||
row = cur.fetchone()
|
||||
ret[row[0]] = row[1]
|
||||
return ret
|
||||
|
||||
def version():
|
||||
'''
|
||||
Return the version of a MySQL server using the output
|
||||
from the SELECT VERSION() query.
|
||||
|
||||
CLI Example:
|
||||
salt '*' mysql.version
|
||||
'''
|
||||
db = connect()
|
||||
cur = db.cursor()
|
||||
cur.execute('SELECT VERSION()')
|
||||
row = cur.fetchone()
|
||||
return row
|
||||
|
|
@ -1,128 +0,0 @@
|
|||
'''
|
||||
Module for gathering and managing network information
|
||||
'''
|
||||
import subprocess
|
||||
import socket
|
||||
from string import ascii_letters, digits
|
||||
|
||||
def _sanitize_host(host):
|
||||
return "".join([
|
||||
c for c in host[0:255] if c in (ascii_letters + digits + '.')
|
||||
])
|
||||
|
||||
def ping(host):
|
||||
'''
|
||||
Performs a ping to a host
|
||||
|
||||
CLI Example:
|
||||
salt '*' network.ping archlinux.org -c 4
|
||||
'''
|
||||
cmd = 'ping -c 4 %s' % _sanitize_host(host)
|
||||
|
||||
out = subprocess.Popen(cmd,
|
||||
shell=True,
|
||||
stdout=subprocess.PIPE).communicate()[0]
|
||||
return out
|
||||
|
||||
def netstat():
|
||||
'''
|
||||
Return information on open ports and states
|
||||
|
||||
CLI Example:
|
||||
salt '*' network.netstat
|
||||
'''
|
||||
cmd = 'netstat -tulpnea'
|
||||
ret = []
|
||||
out = subprocess.Popen(cmd,
|
||||
shell=True,
|
||||
stdout=subprocess.PIPE).communicate()[0].split('\n')
|
||||
for line in out:
|
||||
if not line.count(' '):
|
||||
continue
|
||||
comps = line.split()
|
||||
if line.startswith('tcp'):
|
||||
ret.append( {
|
||||
'proto': comps[0],
|
||||
'recv-q': comps[1],
|
||||
'send-q': comps[2],
|
||||
'local-address': comps[3],
|
||||
'remote-address': comps[4],
|
||||
'state': comps[5],
|
||||
'user': comps[6],
|
||||
'inode': comps[7],
|
||||
'program': comps[8],
|
||||
} )
|
||||
if line.startswith('udp'):
|
||||
ret.append( {
|
||||
'proto': comps[0],
|
||||
'recv-q': comps[1],
|
||||
'send-q': comps[2],
|
||||
'local-address': comps[3],
|
||||
'remote-address': comps[4],
|
||||
'user': comps[5],
|
||||
'inode': comps[6],
|
||||
'program': comps[7],
|
||||
} )
|
||||
return ret
|
||||
|
||||
def traceroute(host):
|
||||
'''
|
||||
Performs a traceroute to a 3rd party host
|
||||
|
||||
CLI Example:
|
||||
salt '*' network.traceroute archlinux.org
|
||||
'''
|
||||
cmd = 'traceroute %s' % _sanitize_host(host)
|
||||
ret = []
|
||||
out = subprocess.Popen(cmd,
|
||||
shell=True,
|
||||
stdout=subprocess.PIPE).communicate()[0].split('\n')
|
||||
for line in out:
|
||||
if not line.count(' '):
|
||||
continue
|
||||
if line.startswith('traceroute'):
|
||||
continue
|
||||
comps = line.split()
|
||||
result = {
|
||||
'count': comps[0],
|
||||
'hostname': comps[1],
|
||||
'ip': comps[2],
|
||||
'ping1': comps[3],
|
||||
'ms1': comps[4],
|
||||
'ping2': comps[5],
|
||||
'ms2': comps[6],
|
||||
'ping3': comps[7],
|
||||
'ms3': comps[8],
|
||||
}
|
||||
ret.append(result)
|
||||
return ret
|
||||
|
||||
def dig(host):
|
||||
'''
|
||||
Performs a DNS lookup with dig
|
||||
|
||||
CLI Example:
|
||||
salt '*' network.dig archlinux.org
|
||||
'''
|
||||
cmd = 'dig %s' % _sanitize_host(host)
|
||||
|
||||
out = subprocess.Popen(cmd,
|
||||
shell=True,
|
||||
stdout=subprocess.PIPE).communicate()[0]
|
||||
return out
|
||||
|
||||
def isportopen(host, port):
|
||||
'''
|
||||
Return status of a port
|
||||
|
||||
CLI Example:
|
||||
salt '*' network.isportopen 127.0.0.1 22
|
||||
'''
|
||||
|
||||
if not (1 <= int(port) <= 65535):
|
||||
return False
|
||||
|
||||
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
||||
out = sock.connect_ex((_sanitize_host(host), int(port)))
|
||||
|
||||
return out
|
|
@ -1,185 +0,0 @@
|
|||
'''
|
||||
A module to wrap pacman calls, since Arch is the best
|
||||
(https://wiki.archlinux.org/index.php/Arch_is_the_best)
|
||||
'''
|
||||
|
||||
import subprocess
|
||||
|
||||
def __virtual__():
|
||||
'''
|
||||
Set the virtual pkg module if the os is Arch
|
||||
'''
|
||||
return 'pkg' if __grains__['os'] == 'Arch' else False
|
||||
|
||||
def _list_removed(old, new):
|
||||
'''
|
||||
List the packages which have been removed between the two package objects
|
||||
'''
|
||||
pkgs = []
|
||||
for pkg in old:
|
||||
if not new.has_key(pkg):
|
||||
pkgs.append(pkg)
|
||||
return pkgs
|
||||
|
||||
def available_version(name):
|
||||
'''
|
||||
The available version of the package in the repository
|
||||
|
||||
CLI Example:
|
||||
salt '*' pkg.available_version <package name>
|
||||
'''
|
||||
return __salt__['cmd.run']('pacman -Sp --print-format %v {0}'.format(name))
|
||||
|
||||
def version(name):
|
||||
'''
|
||||
Returns a version if the package is installed, else returns an empty string
|
||||
|
||||
CLI Example:
|
||||
salt '*' pkg.version <package name>
|
||||
'''
|
||||
pkgs = list_pkgs()
|
||||
if pkgs.has_key(name):
|
||||
return pkgs[name]
|
||||
else:
|
||||
return ''
|
||||
|
||||
def list_pkgs():
|
||||
'''
|
||||
List the packages currently installed in a dict:
|
||||
{'<package_name>': '<version>'}
|
||||
|
||||
CLI Example:
|
||||
salt '*' pkg.list_pkgs
|
||||
'''
|
||||
cmd = 'pacman -Q'
|
||||
ret = {}
|
||||
out = subprocess.Popen(cmd,
|
||||
shell=True,
|
||||
stdout=subprocess.PIPE).communicate()[0].split('\n')
|
||||
for line in out:
|
||||
if not line.count(' '):
|
||||
continue
|
||||
comps = line.split()
|
||||
ret[comps[0]] = comps[1]
|
||||
return ret
|
||||
|
||||
def refresh_db():
|
||||
'''
|
||||
Just run a pacman -Sy, return a dict:
|
||||
{'<database name>': Bool}
|
||||
|
||||
CLI Example:
|
||||
salt '*' pkg.refresh_db
|
||||
'''
|
||||
cmd = 'pacman -Sy'
|
||||
ret = {}
|
||||
out = subprocess.Popen(cmd,
|
||||
shell=True,
|
||||
stdout=subprocess.PIPE).communicate()[0].split('\n')
|
||||
for line in out:
|
||||
if line.strip().startswith('::'):
|
||||
continue
|
||||
if not line:
|
||||
continue
|
||||
key = line.strip().split()[0]
|
||||
if line.count('is up to date'):
|
||||
ret[key] = False
|
||||
elif line.count('downloading'):
|
||||
ret[key] = True
|
||||
return ret
|
||||
|
||||
def install(name, refresh=False):
|
||||
'''
|
||||
Install the passed package, add refresh=True to install with an -Sy
|
||||
|
||||
Return a dict containing the new package names and versions:
|
||||
{'<package>': {'old': '<old-version>',
|
||||
'new': '<new-version>']}
|
||||
|
||||
CLI Example:
|
||||
salt '*' pkg.install <package name>
|
||||
'''
|
||||
old = list_pkgs()
|
||||
cmd = 'pacman -S --noprogressbar --noconfirm ' + name
|
||||
if refresh:
|
||||
cmd = 'pacman -Syu --noprogressbar --noconfirm ' + name
|
||||
subprocess.call(cmd, shell=True)
|
||||
new = list_pkgs()
|
||||
pkgs = {}
|
||||
for npkg in new:
|
||||
if old.has_key(npkg):
|
||||
if old[npkg] == new[npkg]:
|
||||
# no change in the package
|
||||
continue
|
||||
else:
|
||||
# the package was here before and the version has changed
|
||||
pkgs[npkg] = {'old': old[npkg],
|
||||
'new': new[npkg]}
|
||||
else:
|
||||
# the package is freshly installed
|
||||
pkgs[npkg] = {'old': '',
|
||||
'new': new[npkg]}
|
||||
return pkgs
|
||||
|
||||
def upgrade():
|
||||
'''
|
||||
Run a full system upgrade, a pacman -Syu
|
||||
|
||||
Return a dict containing the new package names and versions:
|
||||
{'<package>': {'old': '<old-version>',
|
||||
'new': '<new-version>']}
|
||||
|
||||
CLI Example:
|
||||
salt '*' pkg.upgrade
|
||||
'''
|
||||
old = list_pkgs()
|
||||
cmd = 'pacman -Syu --noprogressbar --noconfirm '
|
||||
subprocess.call(cmd, shell=True)
|
||||
new = list_pkgs()
|
||||
pkgs = {}
|
||||
for npkg in new:
|
||||
if old.has_key(npkg):
|
||||
if old[npkg] == new[npkg]:
|
||||
# no change in the package
|
||||
continue
|
||||
else:
|
||||
# the package was here before and the version has changed
|
||||
pkgs[npkg] = {'old': old[npkg],
|
||||
'new': new[npkg]}
|
||||
else:
|
||||
# the package is freshly installed
|
||||
pkgs[npkg] = {'old': '',
|
||||
'new': new[npkg]}
|
||||
return pkgs
|
||||
|
||||
def remove(name):
|
||||
'''
|
||||
Remove a single package with pacman -R
|
||||
|
||||
Return a list containing the removed packages:
|
||||
|
||||
CLI Example:
|
||||
salt '*' pkg.remove <package name>
|
||||
'''
|
||||
old = list_pkgs()
|
||||
cmd = 'pacman -R --noprogressbar --noconfirm ' + name
|
||||
subprocess.call(cmd, shell=True)
|
||||
new = list_pkgs()
|
||||
return _list_removed(old, new)
|
||||
|
||||
def purge(name):
|
||||
'''
|
||||
Recursively remove a package and all dependencies which were installed
|
||||
with it, this will call a pacman -Rsc
|
||||
|
||||
Return a list containing the removed packages:
|
||||
|
||||
CLI Example:
|
||||
salt '*' pkg.purge <package name>
|
||||
|
||||
'''
|
||||
old = list_pkgs()
|
||||
cmd = 'pacman -R --noprogressbar --noconfirm ' + name
|
||||
subprocess.call(cmd, shell=True)
|
||||
new = list_pkgs()
|
||||
return _list_removed(old, new)
|
152
debian/salt/usr/share/pyshared/salt/modules/ps.py
vendored
152
debian/salt/usr/share/pyshared/salt/modules/ps.py
vendored
|
@ -1,152 +0,0 @@
|
|||
'''
|
||||
A salt interface to psutil, a system and process library.
|
||||
See http://code.google.com/p/psutil.
|
||||
'''
|
||||
import time
|
||||
|
||||
import psutil
|
||||
|
||||
def top(num_processes=5, interval=3):
|
||||
'''
|
||||
Return a list of top CPU consuming processes during the interval.
|
||||
num_processes = return the top N CPU consuming processes
|
||||
interval = the number of seconds to sample CPU usage over
|
||||
'''
|
||||
result = []
|
||||
start_usage = {}
|
||||
for pid in psutil.get_pid_list():
|
||||
p = psutil.Process(pid)
|
||||
user, sys = p.get_cpu_times()
|
||||
start_usage[p] = user + sys
|
||||
time.sleep(interval)
|
||||
usage = set()
|
||||
for p, start in start_usage.iteritems():
|
||||
user, sys = p.get_cpu_times()
|
||||
now = user + sys
|
||||
diff = now - start
|
||||
usage.add((diff,p))
|
||||
|
||||
for i, (diff, p) in enumerate(reversed(sorted(usage))):
|
||||
if num_processes and i >= num_processes:
|
||||
break
|
||||
if len(p.cmdline) == 0:
|
||||
cmdline = [p.name]
|
||||
else:
|
||||
cmdline = p.cmdline
|
||||
info = {'cmd': cmdline,
|
||||
'pid': p.pid,
|
||||
'create_time': p.create_time}
|
||||
for k, v in p.get_cpu_times()._asdict().iteritems():
|
||||
info['cpu.' + k] = v
|
||||
for k, v in p.get_memory_info()._asdict().iteritems():
|
||||
info['mem.' + k] = v
|
||||
result.append(info)
|
||||
|
||||
return result
|
||||
|
||||
def get_pid_list():
|
||||
'''
|
||||
Return a list of process ids (PIDs) for all running processes.
|
||||
'''
|
||||
return psutil.get_pid_list()
|
||||
|
||||
def cpu_percent(interval=0.1, per_cpu=False):
|
||||
'''
|
||||
Return the percent of time the CPU is busy.
|
||||
interval = the number of seconds to sample CPU usage over
|
||||
per_cpu = if True return an array of CPU percent busy for each CPU,
|
||||
otherwise aggregate all precents into one number
|
||||
'''
|
||||
if per_cpu:
|
||||
result = []
|
||||
for cpu_percent in psutil.cpu_percent(interval, True):
|
||||
result.append(cpu_percent)
|
||||
else:
|
||||
result = psutil.cpu_percent(interval)
|
||||
return result
|
||||
|
||||
def cpu_times(per_cpu=False):
|
||||
'''
|
||||
Return the percent of time the CPU spends in each state,
|
||||
e.g. user, system, idle, nice, iowait, irq, softirq.
|
||||
per_cpu = if True return an array of percents for each CPU,
|
||||
otherwise aggregate all precents into one number
|
||||
'''
|
||||
if per_cpu:
|
||||
result = []
|
||||
for cpu_times in psutil.cpu_times(True):
|
||||
result.append(dict(cpu_times._asdict()))
|
||||
else:
|
||||
result = dict(psutil.cpu_times(per_cpu)._asdict())
|
||||
return result
|
||||
|
||||
def phymem_usage():
|
||||
'''
|
||||
Return a dict that describes free and available physical memory.
|
||||
'''
|
||||
return dict(psutil.phymem_usage()._asdict())
|
||||
|
||||
def virtmem_usage():
|
||||
'''
|
||||
Return a dict that describes free and available memory, both physical
|
||||
and virtual.
|
||||
'''
|
||||
return dict(psutil.virtmem_usage()._asdict())
|
||||
|
||||
def cached_phymem():
|
||||
'''
|
||||
Return the amount cached memory.
|
||||
'''
|
||||
return psutil.cached_phymem()
|
||||
|
||||
def phymem_buffers():
|
||||
'''
|
||||
Return the amount of physical memory buffers.
|
||||
'''
|
||||
return psutil.phymem_buffers()
|
||||
|
||||
def disk_partitions(all=False):
|
||||
'''
|
||||
Return a list of disk partitions and their device, mount point, and filesystem type.
|
||||
all = if set to False, only return local, physical partitions (hard disk, USB,
|
||||
CD/DVD partitions). If True, return all filesystems.
|
||||
'''
|
||||
result = []
|
||||
for partition in psutil.disk_partitions(all):
|
||||
result.append(dict(partition._asdict()))
|
||||
return result
|
||||
|
||||
def disk_usage(path):
|
||||
'''
|
||||
Given a path, return a dict listing the total available space as well as
|
||||
the free space, and used space.
|
||||
'''
|
||||
return dict(psutil.disk_usage(path)._asdict())
|
||||
|
||||
def disk_partition_usage(all=False):
|
||||
'''
|
||||
Return a list of disk partitions plus the mount point, filesystem and usage
|
||||
statistics.
|
||||
'''
|
||||
result = disk_partitions(all)
|
||||
for partition in result:
|
||||
partition.update(disk_usage(partition['mountpoint']))
|
||||
return result
|
||||
|
||||
def TOTAL_PHYMEM():
|
||||
'''
|
||||
Return the total number of bytes of physical memory.
|
||||
'''
|
||||
return psutil.TOTAL_PHYMEM
|
||||
|
||||
def NUM_CPUS():
|
||||
'''
|
||||
Return the number of CPUs.
|
||||
'''
|
||||
return psutil.NUM_CPUS
|
||||
|
||||
def BOOT_TIME():
|
||||
'''
|
||||
Return the boot time in number of seconds since the epoch began.
|
||||
'''
|
||||
return psutil.BOOT_TIME
|
|
@ -1,53 +0,0 @@
|
|||
'''
|
||||
Publish a command from a minion to a target
|
||||
'''
|
||||
# Import salt libs
|
||||
import salt.crypt
|
||||
# Import ZeroMQ
|
||||
import zmq
|
||||
|
||||
def _get_socket():
|
||||
'''
|
||||
Return the ZeroMQ socket to use
|
||||
'''
|
||||
context = zmq.Context()
|
||||
socket = context.socket(zmq.REQ)
|
||||
socket.connect(__opts__['master_uri'])
|
||||
return socket
|
||||
|
||||
|
||||
def publish(tgt, fun, arg, expr_form='glob', returner=''):
|
||||
'''
|
||||
Publish a command from the minion out to other minions, publications need
|
||||
to be enabled on the Salt master and the minion needs to have permission
|
||||
to publish the command. The Salt master will also prevent a recursive
|
||||
publication loop, this means that a minion cannot command another minion
|
||||
to command another minion as that would create an infinate command loop.
|
||||
|
||||
The arguments sent to the minion publish function are seperated with
|
||||
commas. This means that a minion who is executing a command with multiple
|
||||
args it will look like this:
|
||||
|
||||
salt system.example.com publish.publish '*' user.add 'foo,1020,1020'
|
||||
|
||||
CLI Example:
|
||||
salt system.example.com publish.publish '*' cmd.run 'ls -la /tmp'
|
||||
'''
|
||||
if fun == 'publish.publish':
|
||||
# Need to log something here
|
||||
return {}
|
||||
auth = salt.crypt.SAuth(__opts__)
|
||||
tok = auth.gen_token('salt')
|
||||
payload = {'enc': 'aes'}
|
||||
load = {
|
||||
'cmd': 'minion_publish',
|
||||
'fun': fun,
|
||||
'arg': arg.split(','),
|
||||
'tgt': tgt,
|
||||
'ret': returner,
|
||||
'tok': tok,
|
||||
'id': __opts__['id']}
|
||||
payload['load'] = auth.crypticle.dumps(load)
|
||||
socket = _get_socket()
|
||||
socket.send_pyobj(payload)
|
||||
return auth.crypticle.loads(socket.recv_pyobj())
|
|
@ -1,25 +0,0 @@
|
|||
'''
|
||||
Execute puppet routines
|
||||
'''
|
||||
|
||||
def _check_puppet():
|
||||
'''
|
||||
Checks if puppet is installed
|
||||
'''
|
||||
# I thought about making this a virtual module, but then I realized that I
|
||||
# would require the minion to restart if puppet was installed after the
|
||||
# minion was started, and that would be rubbish
|
||||
return __salt__['cmd.has_exec']('puppet')
|
||||
|
||||
def run():
|
||||
'''
|
||||
Execute a puppet run and return a dict with the stderr,stdout,return code
|
||||
etc.
|
||||
|
||||
CLI Example:
|
||||
salt '*' puppet.run
|
||||
'''
|
||||
if _check_puppet():
|
||||
return __salt__['cmd.run_all']('puppetd --test')
|
||||
else:
|
||||
return {}
|
|
@ -1,63 +0,0 @@
|
|||
'''
|
||||
Top level package command wrapper, used to translate the os detected by the
|
||||
grains to the correct service manager
|
||||
'''
|
||||
import subprocess
|
||||
import os
|
||||
|
||||
grainmap = {
|
||||
'Arch': '/etc/rc.d',
|
||||
'Fedora': '/etc/init.d',
|
||||
'RedHat': '/etc/init.d',
|
||||
'Debian': '/etc/init.d',
|
||||
'Ubuntu': '/etc/init.d',
|
||||
}
|
||||
|
||||
def start(name):
|
||||
'''
|
||||
Start the specified service
|
||||
|
||||
CLI Example:
|
||||
salt '*' service.start <service name>
|
||||
'''
|
||||
cmd = os.path.join(grainmap[__grains__['os']],
|
||||
name + ' start')
|
||||
return not subprocess.call(cmd, shell=True)
|
||||
|
||||
def stop(name):
|
||||
'''
|
||||
Stop the specified service
|
||||
|
||||
CLI Example:
|
||||
salt '*' service.stop <service name>
|
||||
'''
|
||||
cmd = os.path.join(grainmap[__grains__['os']],
|
||||
name + ' stop')
|
||||
return not subprocess.call(cmd, shell=True)
|
||||
|
||||
def restart(name):
|
||||
'''
|
||||
Restart the named service
|
||||
|
||||
CLI Example:
|
||||
salt '*' service.restart <service name>
|
||||
'''
|
||||
cmd = os.path.join(grainmap[__grains__['os']],
|
||||
name + ' restart')
|
||||
return not subprocess.call(cmd, shell=True)
|
||||
|
||||
def status(name, sig=None):
|
||||
'''
|
||||
Return the status for a service, returns the PID or an empty string if the
|
||||
service is running or not, pass a signature to use to find the service via
|
||||
ps
|
||||
|
||||
CLI Example:
|
||||
salt '*' service.status <service name> [service signature]
|
||||
'''
|
||||
sig = name if not sig else sig
|
||||
cmd = "{0[ps]} | grep {1} | grep -v grep | awk '{{print $2}}'".format(
|
||||
__grains__, sig)
|
||||
return subprocess.Popen(cmd,
|
||||
shell=True,
|
||||
stdout=subprocess.PIPE).communicate()[0].strip()
|
|
@ -1,53 +0,0 @@
|
|||
'''
|
||||
Manage the shadow file
|
||||
'''
|
||||
# Import python libs
|
||||
import os
|
||||
import spwd
|
||||
|
||||
def info(name):
|
||||
'''
|
||||
Return the information for the specified user
|
||||
|
||||
CLI Example:
|
||||
salt '*' shadow.user root
|
||||
'''
|
||||
data = spwd.getspnam(name)
|
||||
return {
|
||||
'name': data.sp_nam,
|
||||
'pwd': data.sp_pwd,
|
||||
'lstchg': data.sp_lstchg,
|
||||
'min': data.sp_min,
|
||||
'max': data.sp_max,
|
||||
'warn': data.sp_warn,
|
||||
'inact': data.sp_inact,
|
||||
'expire': data.sp_expire}
|
||||
|
||||
def set_password(name, password):
|
||||
'''
|
||||
Set the password for a named user, the password must be a properly defined
|
||||
hash, the password hash can be generated with this command:
|
||||
openssl passwd -1 <plaintext password>
|
||||
|
||||
CLI Example:
|
||||
salt '*' root $1$UYCIxa628.9qXjpQCjM4a..
|
||||
'''
|
||||
s_file = '/etc/shadow'
|
||||
ret = {}
|
||||
if not os.path.isfile(s_file):
|
||||
return ret
|
||||
lines = []
|
||||
for line in open(s_file, 'rb').readlines():
|
||||
comps = line.strip().split(':')
|
||||
if not comps[0] == name:
|
||||
lines.append(line)
|
||||
continue
|
||||
comps[1] = password
|
||||
line = ':'.join(comps)
|
||||
lines.append(line)
|
||||
open(s_file, 'w+').writelines(lines)
|
||||
uinfo = info(name)
|
||||
if uinfo['pwd'] == password:
|
||||
return True
|
||||
return False
|
||||
|
154
debian/salt/usr/share/pyshared/salt/modules/ssh.py
vendored
154
debian/salt/usr/share/pyshared/salt/modules/ssh.py
vendored
|
@ -1,154 +0,0 @@
|
|||
'''
|
||||
Manage client ssh components
|
||||
'''
|
||||
|
||||
import os
|
||||
|
||||
def _refine_enc(enc):
|
||||
'''
|
||||
Return the properly formatted ssh value for the authorized encryption key
|
||||
type. If the type is not found, return ssh-rsa, the ssh default.
|
||||
'''
|
||||
rsa = ['r', 'rsa', 'ssh-rsa']
|
||||
dss = ['d', 'dsa', 'dss', 'ssh-dss']
|
||||
if rsa.count(enc):
|
||||
return 'ssh-rsa'
|
||||
elif dss.count(enc):
|
||||
return 'ssh-dss'
|
||||
else:
|
||||
return 'ssh-rsa'
|
||||
|
||||
def _format_auth_line(
|
||||
key,
|
||||
enc,
|
||||
comment,
|
||||
options):
|
||||
line = ''
|
||||
if options:
|
||||
line += '{0} '.format(','.join(options))
|
||||
line += '{0} {1} {2}'.format(enc, key, comment)
|
||||
return line
|
||||
|
||||
def _replace_auth_key(
|
||||
user,
|
||||
key,
|
||||
enc='ssh-rsa',
|
||||
comment='',
|
||||
options=[],
|
||||
config='.ssh/authorized_keys'):
|
||||
'''
|
||||
Replace an existing key
|
||||
'''
|
||||
auth_line = _format_auth_line(
|
||||
key,
|
||||
enc,
|
||||
comment,
|
||||
options)
|
||||
lines = []
|
||||
uinfo = __salt__['user.info'](user)
|
||||
full = os.path.join(uinfo['home'], config)
|
||||
for line in open(full, 'r').readlines():
|
||||
if line.startswith('#'):
|
||||
# Commented Line
|
||||
lines.append(line)
|
||||
continue
|
||||
comps = line.split()
|
||||
if len(comps) < 2:
|
||||
# Not a valid line
|
||||
lines.append(line)
|
||||
continue
|
||||
key_ind = 1
|
||||
if not comps[0].startswith('ssh-'):
|
||||
key_ind = 2
|
||||
if comps[key_ind] == key:
|
||||
lines.append(auth_line)
|
||||
else:
|
||||
lines.append(line)
|
||||
open(full, 'w+').writelines(lines)
|
||||
|
||||
def auth_keys(user, config='.ssh/authorized_keys'):
|
||||
'''
|
||||
Return the authorized keys for the specified user
|
||||
|
||||
CLI Example:
|
||||
salt '*' ssh.auth_keys root
|
||||
'''
|
||||
ret = {}
|
||||
uinfo = __salt__['user.info'](user)
|
||||
full = os.path.join(uinfo['home'], config)
|
||||
if not os.path.isfile(full):
|
||||
return {}
|
||||
for line in open(full, 'r').readlines():
|
||||
if line.startswith('#'):
|
||||
# Commented Line
|
||||
continue
|
||||
comps = line.split()
|
||||
if len(comps) < 2:
|
||||
# Not a valid line
|
||||
continue
|
||||
if not comps[0].startswith('ssh-'):
|
||||
# It has options, grab them
|
||||
options = comps[0].split(',')
|
||||
else:
|
||||
options = []
|
||||
if not options:
|
||||
enc = comps[0]
|
||||
key = comps[1]
|
||||
comment = ' '.join(comps[2:])
|
||||
else:
|
||||
enc = comps[1]
|
||||
key = comps[2]
|
||||
comment = ' '.join(comps[3:])
|
||||
ret[key] = {'enc': enc,
|
||||
'comment': comment,
|
||||
'options': options}
|
||||
|
||||
return ret
|
||||
|
||||
def set_auth_key(
|
||||
user,
|
||||
key,
|
||||
enc='ssh-rsa',
|
||||
comment='',
|
||||
options=[],
|
||||
config='.ssh/authorized_keys'):
|
||||
'''
|
||||
Add a key to the authorized_keys file
|
||||
|
||||
CLI Example:
|
||||
salt '*' ssh.set_auth_key <user> <key> dsa '[]' .ssh/authorized_keys
|
||||
'''
|
||||
enc = _refine_enc(enc)
|
||||
ret = ''
|
||||
replace = False
|
||||
uinfo = __salt__['user.info'](user)
|
||||
current = auth_keys(user, config)
|
||||
if current.has_key(key):
|
||||
if not set(current['options']) == set(options):
|
||||
replace = True
|
||||
if not current['enc'] == enc:
|
||||
replace = True
|
||||
if not current['comment'] == comment:
|
||||
if comment:
|
||||
replace = True
|
||||
if replace:
|
||||
_replace_auth_key(
|
||||
user,
|
||||
key,
|
||||
enc,
|
||||
comment,
|
||||
options,
|
||||
config)
|
||||
return 'replace'
|
||||
else:
|
||||
return 'no change'
|
||||
else:
|
||||
auth_line = _format_auth_line(
|
||||
key,
|
||||
enc,
|
||||
comment,
|
||||
options)
|
||||
open(
|
||||
os.path.join(uinfo['home'], config), 'a+').write(
|
||||
'\n{0}'.format(auth_line))
|
||||
return 'new'
|
|
@ -1,64 +0,0 @@
|
|||
'''
|
||||
Control the state system on the minion
|
||||
'''
|
||||
|
||||
# Import salt modules
|
||||
import salt.state
|
||||
|
||||
__outputter__ = {
|
||||
'highstate': 'highstate'
|
||||
}
|
||||
def low(data):
|
||||
'''
|
||||
Execute a single low data call
|
||||
This function is mostly intended for testing the state system
|
||||
|
||||
CLI Example:
|
||||
salt '*' state.low '{"state": "pkg", "fun": "installed", "name": "vim"}'
|
||||
'''
|
||||
st_ = salt.state.State(__opts__)
|
||||
err = st_.verify_data(data)
|
||||
if err:
|
||||
return err
|
||||
return st_.call(data)
|
||||
|
||||
def high(data):
|
||||
'''
|
||||
Execute the compound calls stored in a single set of high data
|
||||
This function is mostly intended for testing the state system
|
||||
|
||||
CLI Example:
|
||||
salt '*' state.high '{"vim": {"pkg": ["installed"]}}'
|
||||
'''
|
||||
st_ = salt.state.State(__opts__)
|
||||
return st_.call_high(data)
|
||||
|
||||
def template(tem):
|
||||
'''
|
||||
Execute the information stored in a template file on the minion
|
||||
|
||||
CLI Example:
|
||||
salt '*' state.template '<Path to template on the minion>'
|
||||
'''
|
||||
st_ = salt.state.State(__opts__)
|
||||
return st_.call_template(tem)
|
||||
|
||||
def template_str(tem):
|
||||
'''
|
||||
Execute the information stored in a template file on the minion
|
||||
|
||||
CLI Example:
|
||||
salt '*' state.template_str '<Template String>'
|
||||
'''
|
||||
st_ = salt.state.State(__opts__)
|
||||
return st_.call_template_str(tem)
|
||||
|
||||
def highstate():
|
||||
'''
|
||||
Retrive the state data from the salt master for this minion and execute it
|
||||
|
||||
CLI Example:
|
||||
salt '*' state.highstate
|
||||
'''
|
||||
st_ = salt.state.HighState(__opts__)
|
||||
return st_.call_highstate()
|
|
@ -1,374 +0,0 @@
|
|||
'''
|
||||
Module for returning various status data about a minion.
|
||||
These data can be useful for compiling into stats later.
|
||||
'''
|
||||
import fnmatch
|
||||
import os
|
||||
import re
|
||||
import subprocess
|
||||
|
||||
__opts__ = {}
|
||||
|
||||
def _number(text):
|
||||
'''
|
||||
Convert a string to a number.
|
||||
Returns an integer if the string represents an integer, a floating
|
||||
point number if the string is a real number, or the string unchanged
|
||||
otherwise.
|
||||
'''
|
||||
try:
|
||||
return int(text)
|
||||
except ValueError:
|
||||
try:
|
||||
return float(text)
|
||||
except ValueError:
|
||||
return text
|
||||
|
||||
def custom():
|
||||
'''
|
||||
Return a custom composite of status data and info for this minon,
|
||||
based on the minion config file. An example config like might be:
|
||||
|
||||
status.cpustats.custom: [ 'cpu', 'ctxt', 'btime', 'processes' ]
|
||||
|
||||
...where status refers to status.py, cpustats is the function
|
||||
where we get our data, and custom is this function It is followed
|
||||
by a list of keys that we want returned.
|
||||
|
||||
This function is meant to replace all_status(), which returns
|
||||
anything and everything, which we probably don't want.
|
||||
|
||||
By default, nothing is returned. Warning: Depending on what you
|
||||
include, there can be a LOT here!
|
||||
|
||||
CLI Example:
|
||||
salt '*' status.custom
|
||||
'''
|
||||
|
||||
ret = {}
|
||||
for opt in __opts__:
|
||||
keys = opt.split('.')
|
||||
if keys[0] != 'status':
|
||||
continue
|
||||
func = '%s()' % keys[1]
|
||||
vals = eval(func)
|
||||
|
||||
for item in __opts__[opt]:
|
||||
ret[item] = vals[item]
|
||||
|
||||
return ret
|
||||
|
||||
def uptime():
|
||||
'''
|
||||
Return the uptime for this minion
|
||||
|
||||
CLI Example:
|
||||
salt '*' status.uptime
|
||||
'''
|
||||
return subprocess.Popen(['uptime'],
|
||||
stdout=subprocess.PIPE).communicate()[0].strip()
|
||||
|
||||
def loadavg():
|
||||
'''
|
||||
Return the load averages for this minion
|
||||
|
||||
CLI Example:
|
||||
salt '*' status.loadavg
|
||||
'''
|
||||
comps = open('/proc/loadavg', 'r').read().strip()
|
||||
load_avg = comps.split()
|
||||
return {
|
||||
'1-min': _number(load_avg[0]),
|
||||
'5-min': _number(load_avg[1]),
|
||||
'15-min': _number(load_avg[2]),
|
||||
}
|
||||
|
||||
def cpustats():
|
||||
'''
|
||||
Return the CPU stats for this minon
|
||||
|
||||
CLI Example:
|
||||
salt '*' status.cpustats
|
||||
'''
|
||||
stats = open('/proc/stat', 'r').read().split('\n')
|
||||
ret = {}
|
||||
for line in stats:
|
||||
if not line.count(' '):
|
||||
continue
|
||||
comps = line.split()
|
||||
if comps[0] == 'cpu':
|
||||
ret[comps[0]] = {
|
||||
'user': _number(comps[1]),
|
||||
'nice': _number(comps[2]),
|
||||
'system': _number(comps[3]),
|
||||
'idle': _number(comps[4]),
|
||||
'iowait': _number(comps[5]),
|
||||
'irq': _number(comps[6]),
|
||||
'softirq': _number(comps[7]),
|
||||
'steal': _number(comps[8]),
|
||||
}
|
||||
elif comps[0] == 'intr':
|
||||
ret[comps[0]] = {
|
||||
'total': _number(comps[1]),
|
||||
'irqs' : [_number(x) for x in comps[2:]],
|
||||
}
|
||||
elif comps[0] == 'softirq':
|
||||
ret[comps[0]] = {
|
||||
'total': _number(comps[1]),
|
||||
'softirqs': [_number(x) for x in comps[2:]],
|
||||
}
|
||||
else:
|
||||
ret[comps[0]] = _number(comps[1])
|
||||
return ret
|
||||
|
||||
def meminfo():
|
||||
'''
|
||||
Return the CPU stats for this minion
|
||||
|
||||
CLI Example:
|
||||
salt '*' status.meminfo
|
||||
'''
|
||||
stats = open('/proc/meminfo', 'r').read().split('\n')
|
||||
ret = {}
|
||||
for line in stats:
|
||||
if not line.count(' '):
|
||||
continue
|
||||
comps = line.split()
|
||||
comps[0] = comps[0].replace(':', '')
|
||||
ret[comps[0]] = {
|
||||
'value': comps[1],
|
||||
}
|
||||
if len(comps) > 2:
|
||||
ret[comps[0]]['unit'] = comps[2]
|
||||
return ret
|
||||
|
||||
def cpuinfo():
|
||||
'''
|
||||
Return the CPU info for this minion
|
||||
|
||||
CLI Example:
|
||||
salt '*' status.cpuinfo
|
||||
'''
|
||||
stats = open('/proc/cpuinfo', 'r').read().split('\n')
|
||||
ret = {}
|
||||
for line in stats:
|
||||
if not line.count(' '):
|
||||
continue
|
||||
comps = line.split(':')
|
||||
comps[0] = comps[0].strip()
|
||||
if comps[0] == 'flags':
|
||||
ret[comps[0]] = comps[1].split()
|
||||
else:
|
||||
ret[comps[0]] = comps[1].strip()
|
||||
return ret
|
||||
|
||||
def diskstats():
|
||||
'''
|
||||
Return the disk stats for this minion
|
||||
|
||||
CLI Example:
|
||||
salt '*' status.diskstats
|
||||
'''
|
||||
stats = open('/proc/diskstats', 'r').read().split('\n')
|
||||
ret = {}
|
||||
for line in stats:
|
||||
if not line.count(' '):
|
||||
continue
|
||||
comps = line.split()
|
||||
ret[comps[2]] = {
|
||||
'major': _number(comps[0]),
|
||||
'minor': _number(comps[1]),
|
||||
'device': _number(comps[2]),
|
||||
'reads_issued': _number(comps[3]),
|
||||
'reads_merged': _number(comps[4]),
|
||||
'sectors_read': _number(comps[5]),
|
||||
'ms_spent_reading': _number(comps[6]),
|
||||
'writes_completed': _number(comps[7]),
|
||||
'writes_merged': _number(comps[8]),
|
||||
'sectors_written': _number(comps[9]),
|
||||
'ms_spent_writing': _number(comps[10]),
|
||||
'io_in_progress': _number(comps[11]),
|
||||
'ms_spent_in_io': _number(comps[12]),
|
||||
'weighted_ms_spent_in_io': _number(comps[13]),
|
||||
}
|
||||
return ret
|
||||
|
||||
def diskusage( *args ):
|
||||
'''
|
||||
Return the disk usage for this minion
|
||||
|
||||
Usage:
|
||||
salt '*' status.diskusage [paths and/or filesystem types]
|
||||
|
||||
CLI Example:
|
||||
salt '*' status.diskusage # usage for all filesystems
|
||||
salt '*' status.diskusage / /tmp # usage for / and /tmp
|
||||
salt '*' status.diskusage ext? # usage for ext2, ext3, & ext4 filesystems
|
||||
salt '*' status.diskusage / ext? # usage for / and all ext filesystems
|
||||
'''
|
||||
selected = set()
|
||||
fstypes = set()
|
||||
if not args:
|
||||
# select all filesystems
|
||||
fstypes.add('*')
|
||||
else:
|
||||
for arg in args:
|
||||
if arg.startswith('/'):
|
||||
# select path
|
||||
selected.add(arg)
|
||||
else:
|
||||
# select fstype
|
||||
fstypes.add(arg)
|
||||
|
||||
if len(fstypes) > 0:
|
||||
# determine which mount points host the specifed fstypes
|
||||
p = re.compile('|'.join(fnmatch.translate(fstype).format("(%s)")
|
||||
for fstype in fstypes))
|
||||
with open('/proc/mounts', 'r') as fp:
|
||||
for line in fp:
|
||||
comps = line.split()
|
||||
if len(comps) >= 3:
|
||||
mntpt = comps[ 1 ]
|
||||
fstype = comps[ 2 ]
|
||||
if p.match(fstype):
|
||||
selected.add(mntpt)
|
||||
|
||||
# query the filesystems disk usage
|
||||
ret = {}
|
||||
for path in selected:
|
||||
fsstats = os.statvfs( path )
|
||||
blksz = fsstats.f_bsize
|
||||
available = fsstats.f_bavail * blksz
|
||||
total = fsstats.f_blocks * blksz
|
||||
ret[ path ] = { "available" : available,
|
||||
"total" : total }
|
||||
return ret
|
||||
|
||||
def vmstats():
|
||||
'''
|
||||
Return the virtual memory stats for this minion
|
||||
|
||||
CLI Example:
|
||||
salt '*' status.vmstats
|
||||
'''
|
||||
stats = open('/proc/vmstat', 'r').read().split('\n')
|
||||
ret = {}
|
||||
for line in stats:
|
||||
if not line.count(' '):
|
||||
continue
|
||||
comps = line.split()
|
||||
ret[comps[0]] = _number(comps[1])
|
||||
return ret
|
||||
|
||||
def netstats():
|
||||
'''
|
||||
Return the network stats for this minion
|
||||
|
||||
CLI Example:
|
||||
salt '*' status.netstats
|
||||
'''
|
||||
stats = open('/proc/net/netstat', 'r').read().split('\n')
|
||||
ret = {}
|
||||
headers = ['']
|
||||
for line in stats:
|
||||
if not line.count(' '):
|
||||
continue
|
||||
comps = line.split()
|
||||
if comps[0] == headers[0]:
|
||||
index = len(headers) - 1
|
||||
row = {}
|
||||
for field in range(index):
|
||||
if field < 1:
|
||||
continue
|
||||
else:
|
||||
row[headers[field]] = _number(comps[field])
|
||||
rowname = headers[0].replace(':', '')
|
||||
ret[rowname] = row
|
||||
else:
|
||||
headers = comps
|
||||
return ret
|
||||
|
||||
def netdev():
|
||||
'''
|
||||
Return the network device stats for this minion
|
||||
|
||||
CLI Example:
|
||||
salt '*' status.netdev
|
||||
'''
|
||||
stats = open('/proc/net/dev', 'r').read().split('\n')
|
||||
ret = {}
|
||||
for line in stats:
|
||||
if not line.count(' '):
|
||||
continue
|
||||
if line.find(':') < 0:
|
||||
continue
|
||||
comps = line.split()
|
||||
ret[comps[0]] = {
|
||||
'iface': comps[0],
|
||||
'rx_bytes': _number(comps[1]),
|
||||
'rx_packets': _number(comps[2]),
|
||||
'rx_errs': _number(comps[3]),
|
||||
'rx_drop': _number(comps[4]),
|
||||
'rx_fifo': _number(comps[5]),
|
||||
'rx_frame': _number(comps[6]),
|
||||
'rx_compressed': _number(comps[7]),
|
||||
'rx_multicast': _number(comps[8]),
|
||||
'tx_bytes': _number(comps[9]),
|
||||
'tx_packets': _number(comps[10]),
|
||||
'tx_errs': _number(comps[11]),
|
||||
'tx_drop': _number(comps[12]),
|
||||
'tx_fifo': _number(comps[13]),
|
||||
'tx_colls': _number(comps[14]),
|
||||
'tx_carrier': _number(comps[15]),
|
||||
'tx_compressed': _number(comps[16]),
|
||||
}
|
||||
return ret
|
||||
|
||||
def w():
|
||||
'''
|
||||
Return a list of logged in users for this minion, using the w command
|
||||
|
||||
CLI Example:
|
||||
salt '*' status.w
|
||||
'''
|
||||
users = subprocess.Popen(['w -h'],
|
||||
shell=True,
|
||||
stdout=subprocess.PIPE).communicate()[0].split('\n')
|
||||
user_list = []
|
||||
for row in users:
|
||||
if not row.count(' '):
|
||||
continue
|
||||
comps = row.split()
|
||||
rec = {
|
||||
'user': comps[0],
|
||||
'tty': comps[1],
|
||||
'login': comps[2],
|
||||
'idle': comps[3],
|
||||
'jcpu': comps[4],
|
||||
'pcpu': comps[5],
|
||||
'what': ' '.join(comps[6:]),
|
||||
}
|
||||
user_list.append( rec )
|
||||
return user_list
|
||||
|
||||
def all_status():
|
||||
'''
|
||||
Return a composite of all status data and info for this minoon.
|
||||
Warning: There is a LOT here!
|
||||
|
||||
CLI Example:
|
||||
salt '*' status.all_status
|
||||
'''
|
||||
return {
|
||||
'cpuinfo': cpuinfo(),
|
||||
'cpustats': cpustats(),
|
||||
'diskstats': diskstats(),
|
||||
'loadavg': loadavg(),
|
||||
'meminfo': meminfo(),
|
||||
'netdev': netdev(),
|
||||
'netstats': netstats(),
|
||||
'uptime': uptime(),
|
||||
'vmstats': vmstats(),
|
||||
'w': w(),
|
||||
}
|
||||
|
|
@ -1,52 +0,0 @@
|
|||
'''
|
||||
Module for viewing and modifying sysctl parameters
|
||||
'''
|
||||
import subprocess
|
||||
|
||||
def show():
|
||||
'''
|
||||
Return a list of sysctl parameters for this minion
|
||||
|
||||
CLI Example:
|
||||
salt '*' sysctl.show
|
||||
'''
|
||||
cmd = 'sysctl -a'
|
||||
ret = {}
|
||||
out = subprocess.Popen(cmd,
|
||||
shell=True,
|
||||
stdout=subprocess.PIPE).communicate()[0].split('\n')
|
||||
for line in out:
|
||||
if not line.count(' '):
|
||||
continue
|
||||
comps = line.split(' = ')
|
||||
ret[comps[0]] = comps[1]
|
||||
return ret
|
||||
|
||||
def get(name):
|
||||
'''
|
||||
Return a single sysctl parameter for this minion
|
||||
|
||||
CLI Example:
|
||||
salt '*' sysctl.get net.ipv4.ip_forward
|
||||
'''
|
||||
cmd = 'sysctl -n %s' % name
|
||||
out = subprocess.Popen(cmd,
|
||||
shell=True,
|
||||
stdout=subprocess.PIPE).communicate()[0]
|
||||
return out[0]
|
||||
|
||||
def assign(name, value):
|
||||
'''
|
||||
Assign a single sysctl parameter for this minion
|
||||
|
||||
CLI Example:
|
||||
salt '*' sysctl.assign net.ipv4.ip_forward 1
|
||||
'''
|
||||
cmd = 'sysctl -w %s=%s' % ( name, value )
|
||||
ret = {}
|
||||
out = subprocess.Popen(cmd,
|
||||
shell=True,
|
||||
stdout=subprocess.PIPE).communicate()[0].strip()
|
||||
comps = out.split(' = ')
|
||||
ret[comps[0]] = comps[1]
|
||||
return ret
|
108
debian/salt/usr/share/pyshared/salt/modules/test.py
vendored
108
debian/salt/usr/share/pyshared/salt/modules/test.py
vendored
|
@ -1,108 +0,0 @@
|
|||
'''
|
||||
Module for running arbitrary tests
|
||||
'''
|
||||
|
||||
import time
|
||||
|
||||
# Load in default options for the module
|
||||
__opts__ = {
|
||||
'test.foo': 'foo'
|
||||
}
|
||||
# Load the outputters for the module
|
||||
__outputter__ = {
|
||||
'outputter': 'txt'
|
||||
}
|
||||
|
||||
def echo(text):
|
||||
'''
|
||||
Return a string - used for testing the connection
|
||||
|
||||
CLI Example:
|
||||
salt '*' test.echo 'foo bar baz quo qux'
|
||||
'''
|
||||
print 'Echo got called!'
|
||||
return text
|
||||
|
||||
def ping():
|
||||
'''
|
||||
Just used to make sure the minion is up and responding
|
||||
Return True
|
||||
|
||||
CLI Example:
|
||||
salt '*' test.ping
|
||||
'''
|
||||
return True
|
||||
|
||||
def conf_test():
|
||||
'''
|
||||
Return the value for test.foo in the minion configuration file, or return
|
||||
the default value
|
||||
|
||||
CLI Example:
|
||||
salt '*' test.conf_test
|
||||
'''
|
||||
return __opts__['test.foo']
|
||||
|
||||
def get_opts():
|
||||
'''
|
||||
Return the configuration options passed to this minion
|
||||
|
||||
CLI Example:
|
||||
salt '*' test.get_opts
|
||||
'''
|
||||
return __opts__
|
||||
|
||||
def cross_test(func, args=[]):
|
||||
'''
|
||||
Execute a minion function via the __salt__ object in the test module, used
|
||||
to verify that the minion functions can be called via the __salt__module
|
||||
|
||||
CLI Example:
|
||||
salt '*' test.cross_test file.gid_to_group 0
|
||||
'''
|
||||
return __salt__[func](*args)
|
||||
|
||||
def fib(num):
|
||||
'''
|
||||
Return a Fibonacci sequence up to the passed number, and the time it took
|
||||
to compute in seconds. Used for performance tests
|
||||
|
||||
CLI Example:
|
||||
salt '*' test.fib 3
|
||||
'''
|
||||
start = time.time()
|
||||
a, b = 0, 1
|
||||
ret = [0]
|
||||
while b < num:
|
||||
ret.append(b)
|
||||
a, b = b, a + b
|
||||
return ret, time.time() - start
|
||||
|
||||
def collatz(start):
|
||||
'''
|
||||
Execute the collatz conjecture from the passed starting number, returns
|
||||
the sequence and the time it took to compute. Used for performance tests.
|
||||
|
||||
CLI Example:
|
||||
salt '*' test.collatz 3
|
||||
'''
|
||||
begin = time.time()
|
||||
steps = []
|
||||
while start != 1:
|
||||
steps.append(start)
|
||||
if start > 1:
|
||||
if start % 2 == 0:
|
||||
start = start / 2
|
||||
else:
|
||||
start = start * 3 + 1
|
||||
return steps, time.time() - begin
|
||||
|
||||
def outputter(data):
|
||||
'''
|
||||
Test the outputter, pass in data to return
|
||||
|
||||
CLI Example:
|
||||
salt '*' test.outputter foobar
|
||||
'''
|
||||
return data
|
||||
|
|
@ -1,197 +0,0 @@
|
|||
'''
|
||||
Manage users with the useradd command
|
||||
'''
|
||||
|
||||
import pwd
|
||||
import grp
|
||||
|
||||
def __virtual__():
|
||||
'''
|
||||
Set the user module if the kernel is Linux
|
||||
'''
|
||||
return 'user' if __grains__['kernel'] == 'Linux' else False
|
||||
|
||||
def add(name,
|
||||
uid=None,
|
||||
gid=None,
|
||||
groups=None,
|
||||
home=False,
|
||||
shell='/bin/false'):
|
||||
'''
|
||||
Add a user to the minion
|
||||
|
||||
CLI Example:
|
||||
salt '*' user.add name <uid> <gid> <groups> <home> <shell>
|
||||
'''
|
||||
cmd = 'useradd -s {0} '.format(shell)
|
||||
if uid:
|
||||
cmd += '-u {0} '.format(uid)
|
||||
if gid:
|
||||
cmd += '-g {0} '.format(gid)
|
||||
if groups:
|
||||
cmd += '-G {0} '.format(groups)
|
||||
if home:
|
||||
cmd += '-m -d {0} '.format(home)
|
||||
cmd += name
|
||||
ret = __salt__['cmd.run_all'](cmd)
|
||||
|
||||
return not ret['retcode']
|
||||
|
||||
def delete(name, remove=False, force=False):
|
||||
'''
|
||||
Remove a user from the minion
|
||||
|
||||
CLI Example:
|
||||
salt '*' user.delete name True True
|
||||
'''
|
||||
cmd = 'userdel '
|
||||
if remove:
|
||||
cmd += '-r '
|
||||
if force:
|
||||
cmd += '-f '
|
||||
cmd += name
|
||||
|
||||
ret = __salt__['cmd.run_all'](cmd)
|
||||
|
||||
return not ret['retcode']
|
||||
|
||||
def getent():
|
||||
'''
|
||||
Return the list of all info for all users
|
||||
|
||||
CLI Example:
|
||||
salt '*' user.getent
|
||||
'''
|
||||
ret = []
|
||||
for data in pwd.getpwall():
|
||||
ret.append(info(data.pw_name))
|
||||
return ret
|
||||
|
||||
def chuid(name, uid):
|
||||
'''
|
||||
Change the uid for a named user
|
||||
|
||||
CLI Example:
|
||||
salt '*' user.chuid foo 4376
|
||||
'''
|
||||
pre_info = info(name)
|
||||
if uid == pre_info['uid']:
|
||||
return True
|
||||
cmd = 'usermod -u {0} {1}'.format(uid, name)
|
||||
__salt__['cmd.run'](cmd)
|
||||
post_info = info(name)
|
||||
if post_info['uid'] != pre_info['uid']:
|
||||
if post_info['uid'] == uid:
|
||||
return True
|
||||
return False
|
||||
|
||||
def chgid(name, gid):
|
||||
'''
|
||||
Change the default group of the user
|
||||
|
||||
CLI Example:
|
||||
salt '*' user.chgid foo 4376
|
||||
'''
|
||||
pre_info = info(name)
|
||||
if gid == pre_info['gid']:
|
||||
return True
|
||||
cmd = 'usermod -g {0} {1}'.format(gid, name)
|
||||
__salt__['cmd.run'](cmd)
|
||||
post_info = info(name)
|
||||
if post_info['gid'] != pre_info['gid']:
|
||||
if post_info['gid'] == gid:
|
||||
return True
|
||||
return False
|
||||
|
||||
def chshell(name, shell):
|
||||
'''
|
||||
Change the default shell of the user
|
||||
|
||||
CLI Example:
|
||||
salt '*' user.chshell foo /bin/zsh
|
||||
'''
|
||||
pre_info = info(name)
|
||||
if shell == pre_info['shell']:
|
||||
return True
|
||||
cmd = 'usermod -s {0} {1}'.format(shell, name)
|
||||
__salt__['cmd.run'](cmd)
|
||||
post_info = info(name)
|
||||
if post_info['shell'] != pre_info['shell']:
|
||||
if post_info['shell'] == shell:
|
||||
return True
|
||||
return False
|
||||
|
||||
def chhome(name, home, persist=False):
|
||||
'''
|
||||
Change the home directory of the user, pass true for persist to copy files
|
||||
to the new home dir
|
||||
|
||||
CLI Example:
|
||||
salt '*' user.chhome foo /home/users/foo True
|
||||
'''
|
||||
pre_info = info(name)
|
||||
if home == pre_info['home']:
|
||||
return True
|
||||
cmd = 'usermod -d {0} '.format(home)
|
||||
if persist:
|
||||
cmd += ' -m '
|
||||
cmd += name
|
||||
__salt__['cmd.run'](cmd)
|
||||
post_info = info(name)
|
||||
if post_info['home'] != pre_info['home']:
|
||||
if post_info['home'] == home:
|
||||
return True
|
||||
return False
|
||||
|
||||
def chgroups(name, groups, append=False):
|
||||
'''
|
||||
Change the groups this user belongs to, add append to append the specified
|
||||
groups
|
||||
|
||||
CLI Example:
|
||||
salt '*' user.chgroups foo wheel,root True
|
||||
'''
|
||||
if type(groups) == type(str()):
|
||||
groups = groups.split(',')
|
||||
ugrps = set(list_groups(name))
|
||||
if ugrps == set(groups):
|
||||
return True
|
||||
cmd = 'usermod -G {0} {1} '.format(','.join(groups), name)
|
||||
if append:
|
||||
cmd += '-a'
|
||||
__salt__['cmd.run'](cmd)
|
||||
agrps = set(list_groups(name))
|
||||
if ugrps.difference(agrps):
|
||||
return True
|
||||
return False
|
||||
|
||||
def info(name):
|
||||
'''
|
||||
Return user information
|
||||
|
||||
CLI Example:
|
||||
salt '*' user.info root
|
||||
'''
|
||||
ret = {}
|
||||
data = pwd.getpwnam(name)
|
||||
ret['name'] = data.pw_name
|
||||
ret['passwd'] = data.pw_passwd
|
||||
ret['uid'] = data.pw_uid
|
||||
ret['gid'] = data.pw_gid
|
||||
ret['home'] = data.pw_dir
|
||||
ret['shell'] = data.pw_shell
|
||||
ret['groups'] = list_groups(name)
|
||||
return ret
|
||||
|
||||
def list_groups(name):
|
||||
'''
|
||||
Return a list of groups the named user belings to
|
||||
|
||||
CLI Example:
|
||||
salt '*' user.groups foo
|
||||
'''
|
||||
ugrp = set()
|
||||
for group in grp.getgrall():
|
||||
if group.gr_mem.count(name):
|
||||
ugrp.add(group.gr_name)
|
||||
return sorted(list(ugrp))
|
450
debian/salt/usr/share/pyshared/salt/modules/virt.py
vendored
450
debian/salt/usr/share/pyshared/salt/modules/virt.py
vendored
|
@ -1,450 +0,0 @@
|
|||
'''
|
||||
Work with virtual machines managed by libvirt
|
||||
'''
|
||||
# Special Thanks to Michael Dehann, many of the concepts, and a few structures
|
||||
# of his in the virt func module have been used
|
||||
|
||||
|
||||
# Import Python Libs
|
||||
import os
|
||||
import StringIO
|
||||
from xml.dom import minidom
|
||||
import subprocess
|
||||
import shutil
|
||||
|
||||
# Import libvirt
|
||||
import libvirt
|
||||
|
||||
# Import Third Party Libs
|
||||
import yaml
|
||||
|
||||
VIRT_STATE_NAME_MAP = {
|
||||
0 : "running",
|
||||
1 : "running",
|
||||
2 : "running",
|
||||
3 : "paused",
|
||||
4 : "shutdown",
|
||||
5 : "shutdown",
|
||||
6 : "crashed"
|
||||
}
|
||||
|
||||
def __get_conn():
|
||||
'''
|
||||
Detects what type of dom this node is and attempts to connect to the
|
||||
correct hypervisor via libvirt.
|
||||
'''
|
||||
# This only supports kvm right now, it needs to be expanded to support
|
||||
# all vm layers supported by libvirt
|
||||
return libvirt.open("qemu:///system")
|
||||
|
||||
def _get_dom(vm_):
|
||||
'''
|
||||
Return a domain object for the named vm
|
||||
'''
|
||||
conn = __get_conn()
|
||||
if not list_vms().count(vm_):
|
||||
raise Exception('The specified vm is not present')
|
||||
return conn.lookupByName(vm_)
|
||||
|
||||
def _libvirt_creds():
|
||||
'''
|
||||
Returns the user and group that the disk images should be owned by
|
||||
'''
|
||||
g_cmd = 'grep group /etc/libvirt/qemu.conf'
|
||||
u_cmd = 'grep user /etc/libvirt/qemu.conf'
|
||||
group = subprocess.Popen(g_cmd,
|
||||
shell=True,
|
||||
stdout=subprocess.PIPE).communicate()[0].split('"')[1]
|
||||
user = subprocess.Popen(u_cmd,
|
||||
shell=True,
|
||||
stdout=subprocess.PIPE).communicate()[0].split('"')[1]
|
||||
return {'user': user, 'group': group}
|
||||
|
||||
def list_vms():
|
||||
'''
|
||||
Return a list of virtual machine names on the minion
|
||||
|
||||
CLI Example:
|
||||
salt '*' virt.list_vms
|
||||
'''
|
||||
conn = __get_conn()
|
||||
vms = []
|
||||
for id_ in conn.listDomainsID():
|
||||
vms.append(conn.lookupByID(id_).name())
|
||||
return vms
|
||||
|
||||
def vm_info():
|
||||
'''
|
||||
Return detailed information about the vms on this hyper in a dict:
|
||||
|
||||
{'cpu': <int>,
|
||||
'maxMem': <int>,
|
||||
'mem': <int>,
|
||||
'state': '<state>',
|
||||
'cputime' <int>}
|
||||
|
||||
CLI Example:
|
||||
salt '*' virt.vm_info
|
||||
'''
|
||||
info = {}
|
||||
for vm_ in list_vms():
|
||||
dom = _get_dom(vm_)
|
||||
raw = dom.info()
|
||||
info[vm_] = {
|
||||
'state': VIRT_STATE_NAME_MAP.get(raw[0], 'unknown'),
|
||||
'maxMem': int(raw[1]),
|
||||
'mem': int(raw[2]),
|
||||
'cpu': raw[3],
|
||||
'cputime': int(raw[4]),
|
||||
'graphics': get_graphics(vm_),
|
||||
'disks': get_disks(vm_),
|
||||
}
|
||||
return info
|
||||
|
||||
def node_info():
|
||||
'''
|
||||
Return a dict with information about this node
|
||||
|
||||
CLI Example:
|
||||
salt '*' virt.node_info
|
||||
'''
|
||||
conn = __get_conn()
|
||||
raw = conn.getInfo()
|
||||
info = {
|
||||
'cpumodel' : str(raw[0]),
|
||||
'phymemory' : raw[1],
|
||||
'cpus' : raw[2],
|
||||
'cpumhz' : raw[3],
|
||||
'numanodes' : raw[4],
|
||||
'sockets' : raw[5],
|
||||
'cpucores' : raw[6],
|
||||
'cputhreads' : raw[7]
|
||||
}
|
||||
return info
|
||||
|
||||
def get_graphics(vm_):
|
||||
'''
|
||||
Returns the information on vnc for a given vm
|
||||
|
||||
CLI Example:
|
||||
salt '*' virt.get_graphics <vm name>
|
||||
'''
|
||||
out = {'autoport': 'None',
|
||||
'keymap': 'None',
|
||||
'type': 'vnc',
|
||||
'port': 'None',
|
||||
'listen': 'None'}
|
||||
xml = get_xml(vm_)
|
||||
ssock = StringIO.StringIO(xml)
|
||||
doc = minidom.parse(ssock)
|
||||
for node in doc.getElementsByTagName("domain"):
|
||||
g_nodes = node.getElementsByTagName("graphics")
|
||||
for g_node in g_nodes:
|
||||
for key in g_node.attributes.keys():
|
||||
out[key] = g_node.getAttribute(key)
|
||||
return out
|
||||
|
||||
def get_disks(vm_):
|
||||
'''
|
||||
Return the disks of a named vm
|
||||
|
||||
CLI Example:
|
||||
salt '*' virt.get_disks <vm name>
|
||||
'''
|
||||
disks = {}
|
||||
doc = minidom.parse(StringIO.StringIO(get_xml(vm_)))
|
||||
for elem in doc.getElementsByTagName('disk'):
|
||||
sources = elem.getElementsByTagName('source')
|
||||
targets = elem.getElementsByTagName('target')
|
||||
if len(sources) > 0:
|
||||
source = sources[0]
|
||||
else:
|
||||
continue
|
||||
if len(targets) > 0:
|
||||
target = targets[0]
|
||||
else:
|
||||
continue
|
||||
if target.attributes.keys().count('dev')\
|
||||
and source.attributes.keys().count('file'):
|
||||
disks[target.getAttribute('dev')] =\
|
||||
{'file': source.getAttribute('file')}
|
||||
for dev in disks:
|
||||
disks[dev].update(yaml.load(subprocess.Popen('qemu-img info '\
|
||||
+ disks[dev]['file'],
|
||||
shell=True,
|
||||
stdout=subprocess.PIPE).communicate()[0]))
|
||||
return disks
|
||||
|
||||
def freemem():
|
||||
'''
|
||||
Return an int representing the amount of memory that has not been given
|
||||
to virtual machines on this node
|
||||
|
||||
CLI Example:
|
||||
salt '*' virt.freemem
|
||||
'''
|
||||
conn = __get_conn()
|
||||
mem = conn.getInfo()[1]
|
||||
# Take off just enough to sustain the hypervisor
|
||||
mem -= 256
|
||||
for vm_ in list_vms():
|
||||
dom = _get_dom(vm_)
|
||||
if dom.ID() > 0:
|
||||
mem -= dom.info()[2]/1024
|
||||
return mem
|
||||
|
||||
def freecpu():
|
||||
'''
|
||||
Return an int representing the number of unallocated cpus on this
|
||||
hypervisor
|
||||
|
||||
CLI Example:
|
||||
salt '*' virt.freemem
|
||||
'''
|
||||
conn = __get_conn()
|
||||
cpus = conn.getInfo()[2]
|
||||
for vm_ in list_vms():
|
||||
dom = _get_dom(vm_)
|
||||
if dom.ID() > 0:
|
||||
cpus -= dom.info()[3]
|
||||
return cpus
|
||||
|
||||
def full_info():
|
||||
'''
|
||||
Return the node_info, vm_info and freemem
|
||||
|
||||
CLI Example:
|
||||
salt '*' virt.full_info
|
||||
'''
|
||||
return {'freemem': freemem(),
|
||||
'node_info': node_info(),
|
||||
'vm_info': vm_info(),
|
||||
'freecpu': freecpu()}
|
||||
|
||||
def get_xml(vm_):
|
||||
'''
|
||||
Returns the xml for a given vm
|
||||
|
||||
CLI Example:
|
||||
salt '*' virt.get_xml <vm name>
|
||||
'''
|
||||
dom = _get_dom(vm_)
|
||||
return dom.XMLDesc(0)
|
||||
|
||||
def shutdown(vm_):
|
||||
'''
|
||||
Send a soft shutdown signal to the named vm
|
||||
|
||||
CLI Example:
|
||||
salt '*' virt.shutdown <vm name>
|
||||
'''
|
||||
dom = _get_dom(vm_)
|
||||
dom.shutdown()
|
||||
return True
|
||||
|
||||
def pause(vm_):
|
||||
'''
|
||||
Pause the named vm
|
||||
|
||||
CLI Example:
|
||||
salt '*' virt.pause <vm name>
|
||||
'''
|
||||
dom = _get_dom(vm_)
|
||||
dom.suspend()
|
||||
return True
|
||||
|
||||
def resume(vm_):
|
||||
'''
|
||||
Resume the named vm
|
||||
|
||||
CLI Example:
|
||||
salt '*' virt.resume <vm name>
|
||||
'''
|
||||
dom = _get_dom(vm_)
|
||||
dom.resume()
|
||||
return True
|
||||
|
||||
def create(vm_):
|
||||
'''
|
||||
Start a defined domain
|
||||
|
||||
CLI Example:
|
||||
salt '*' virt.create <vm name>
|
||||
'''
|
||||
dom = _get_dom(vm_)
|
||||
dom.create()
|
||||
return True
|
||||
|
||||
def create_xml_str(xml):
|
||||
'''
|
||||
Start a domain based on the xml passed to the function
|
||||
|
||||
CLI Example:
|
||||
salt '*' virt.create_xml_str <xml in string format>
|
||||
'''
|
||||
conn = __get_conn()
|
||||
conn.createXML(xml, 0)
|
||||
return True
|
||||
|
||||
def create_xml_path(path):
|
||||
'''
|
||||
Start a defined domain
|
||||
|
||||
CLI Example:
|
||||
salt '*' virt.create_xml_path <path to xml file on the node>
|
||||
'''
|
||||
if not os.path.isfile(path):
|
||||
return False
|
||||
return create_xml_str(open(path, 'r').read())
|
||||
|
||||
def migrate_non_shared(vm_, target):
|
||||
'''
|
||||
Attempt to execute non-shared storage "all" migration
|
||||
|
||||
CLI Example:
|
||||
salt '*' virt.migrate_non_shared <vm name> <target hypervisor>
|
||||
'''
|
||||
cmd = 'virsh migrate --live --copy-storage-all ' + vm_\
|
||||
+ ' qemu://' + target + '/system'
|
||||
|
||||
return subprocess.Popen(cmd,
|
||||
shell=True,
|
||||
stdout=subprocess.PIPE).communicate()[0]
|
||||
|
||||
def migrate_non_shared_inc(vm_, target):
|
||||
'''
|
||||
Attempt to execute non-shared storage "all" migration
|
||||
|
||||
CLI Example:
|
||||
salt '*' virt.migrate_non_shared_inc <vm name> <target hypervisor>
|
||||
'''
|
||||
cmd = 'virsh migrate --live --copy-storage-inc ' + vm_\
|
||||
+ ' qemu://' + target + '/system'
|
||||
|
||||
return subprocess.Popen(cmd,
|
||||
shell=True,
|
||||
stdout=subprocess.PIPE).communicate()[0]
|
||||
|
||||
def migrate(vm_, target):
|
||||
'''
|
||||
Shared storage migration
|
||||
|
||||
CLI Example:
|
||||
salt '*' virt.migrate <vm name> <target hypervisor>
|
||||
'''
|
||||
cmd = 'virsh migrate --live ' + vm_\
|
||||
+ ' qemu://' + target + '/system'
|
||||
|
||||
return subprocess.Popen(cmd,
|
||||
shell=True,
|
||||
stdout=subprocess.PIPE).communicate()[0]
|
||||
dom = _get_dom(vm_)
|
||||
|
||||
def seed_non_shared_migrate(disks, force=False):
|
||||
'''
|
||||
Non shared migration requires that the disks be present on the migration
|
||||
destination, pass the disks information via this function, to the
|
||||
migration destination before executing the migration.
|
||||
|
||||
CLI Example:
|
||||
salt '*' virt.seed_non_shared_migrate <disks>
|
||||
'''
|
||||
for dev, data in disks.items():
|
||||
fn_ = data['file']
|
||||
form = data['file format']
|
||||
size = data['virtual size'].split()[1][1:]
|
||||
if os.path.isfile(fn_) and not force:
|
||||
# the target exists, check to see if is is compatible
|
||||
pre = yaml.load(subprocess.Popen('qemu-img info arch',
|
||||
shell=True,
|
||||
stdout=subprocess.PIPE).communicate()[0])
|
||||
if not pre['file format'] == data['file format']\
|
||||
and not pre['virtual size'] == data['virtual size']:
|
||||
return False
|
||||
if not os.path.isdir(os.path.dirname(fn_)):
|
||||
os.makedirs(os.path.dirname(fn_))
|
||||
if os.path.isfile(fn_):
|
||||
os.remove(fn_)
|
||||
cmd = 'qemu-img create -f ' + form + ' ' + fn_ + ' ' + size
|
||||
subprocess.call(cmd, shell=True)
|
||||
creds = _libvirt_creds()
|
||||
cmd = 'chown ' + creds['user'] + ':' + creds['group'] + ' ' + fn_
|
||||
subprocess.call(cmd, shell=True)
|
||||
return True
|
||||
|
||||
def destroy(vm_):
|
||||
'''
|
||||
Hard power down the virtual machine, this is equivalent to pulling the
|
||||
power
|
||||
|
||||
CLI Example:
|
||||
salt '*' virt.destroy <vm name>
|
||||
'''
|
||||
try:
|
||||
dom = _get_dom(vm_)
|
||||
dom.destroy()
|
||||
except:
|
||||
return False
|
||||
return True
|
||||
|
||||
def undefine(vm_):
|
||||
'''
|
||||
Remove a defined vm, this does not purge the virtual machine image, and
|
||||
this only works if the vm is powered down
|
||||
|
||||
CLI Example:
|
||||
salt '*' virt.undefine <vm name>
|
||||
'''
|
||||
try:
|
||||
dom = _get_dom(vm_)
|
||||
dom.undefine()
|
||||
except:
|
||||
return False
|
||||
return True
|
||||
|
||||
def purge(vm_, dirs=False):
|
||||
'''
|
||||
Recursively destroy and delete a virtual machine, pass True for dir's to
|
||||
also delete the directories containing the virtual machine disk images -
|
||||
USE WITH EXTREAME CAUTION!
|
||||
|
||||
CLI Example:
|
||||
salt '*' virt.purge <vm name>
|
||||
'''
|
||||
disks = get_disks(vm_)
|
||||
destroy(vm_)
|
||||
directories = set()
|
||||
for disk in disks:
|
||||
os.remove(disks[disk]['file'])
|
||||
directories.add(os.path.dirname(disks[disk]['file']))
|
||||
if dirs:
|
||||
for dir_ in directories:
|
||||
shutil.rmtree(dir_)
|
||||
return True
|
||||
|
||||
def virt_type():
|
||||
'''
|
||||
Returns the virtual machine type as a string
|
||||
|
||||
CLI Example:
|
||||
salt '*' virt.virt_type
|
||||
'''
|
||||
return __grains__['virtual']
|
||||
|
||||
def is_kvm_hyper():
|
||||
'''
|
||||
Returns a bool whether or not this node is a hypervisor
|
||||
|
||||
CLI Example:
|
||||
salt '*' virt.is_kvm_hyper
|
||||
'''
|
||||
if __grains__['virtual'] != 'physical':
|
||||
return False
|
||||
if not open('/proc/modules').read().count('kvm_'):
|
||||
return False
|
||||
libvirt_ret = subprocess.Popen('ps aux',
|
||||
shell=True,
|
||||
stdout=subprocess.PIPE).communicate()[0].count('libvirtd')
|
||||
if not libvirt_ret:
|
||||
return False
|
||||
return True
|
177
debian/salt/usr/share/pyshared/salt/modules/yum.py
vendored
177
debian/salt/usr/share/pyshared/salt/modules/yum.py
vendored
|
@ -1,177 +0,0 @@
|
|||
'''
|
||||
Support for YUM
|
||||
'''
|
||||
|
||||
def __virtual__():
|
||||
'''
|
||||
Confine this module to yum based systems
|
||||
'''
|
||||
# We don't need to support pre-yum OSes because they don't support python 2.6
|
||||
dists = 'CentOS Scientific RedHat Fedora'
|
||||
return 'pkg' if dists.count(__grains__['os']) else False
|
||||
|
||||
def _list_removed(old, new):
|
||||
'''
|
||||
List the packages which have been removed between the two package objects
|
||||
'''
|
||||
pkgs = []
|
||||
for pkg in old:
|
||||
if not new.has_key(pkg):
|
||||
pkgs.append(pkg)
|
||||
return pkgs
|
||||
|
||||
def available_version(name):
|
||||
'''
|
||||
The available version of the package in the repository
|
||||
|
||||
CLI Example:
|
||||
salt '*' pkg.available_version <package name>
|
||||
'''
|
||||
out = __salt__['cmd.run_stdout']('yum list {0} -q'.format(name))
|
||||
for line in out.split('\n'):
|
||||
if not line.strip():
|
||||
continue
|
||||
# Itterate through the output
|
||||
comps = line.split()
|
||||
if comps[0].split('.')[0] == name:
|
||||
if len(comps) < 2:
|
||||
continue
|
||||
# found it!
|
||||
return comps[1][:comps[1].rindex('.')]
|
||||
# Package not available
|
||||
return ''
|
||||
|
||||
def version(name):
|
||||
'''
|
||||
Returns a version if the package is installed, else returns an empty string
|
||||
|
||||
CLI Example:
|
||||
salt '*' pkg.version <package name>
|
||||
'''
|
||||
pkgs = list_pkgs()
|
||||
if pkgs.has_key(name):
|
||||
return pkgs[name]
|
||||
else:
|
||||
return ''
|
||||
|
||||
def list_pkgs():
|
||||
'''
|
||||
List the packages currently installed in a dict:
|
||||
{'<package_name>': '<version>'}
|
||||
|
||||
CLI Example:
|
||||
salt '*' pkg.list_pkgs
|
||||
'''
|
||||
cmd = "rpm -qa --qf '%{NAME}:%{VERSION}-%{RELEASE};'"
|
||||
ret = {}
|
||||
out = __salt__['cmd.run_stdout'](cmd)
|
||||
for line in out.split(';'):
|
||||
if not line.count(':'):
|
||||
continue
|
||||
comps = line.split(':')
|
||||
ret[comps[0]] = comps[1]
|
||||
return ret
|
||||
|
||||
def refresh_db():
|
||||
'''
|
||||
Since yum refreshes the database automatically, this runs a yum clean,
|
||||
so that the next yum operation will have a clean database
|
||||
|
||||
CLI Example:
|
||||
salt '*' pkg.refresh_db
|
||||
'''
|
||||
cmd = 'yum clean dbcache'
|
||||
__salt__['cmd.run'](cmd)
|
||||
return True
|
||||
|
||||
def install(pkg, refresh=False):
|
||||
'''
|
||||
Install the passed package, add refresh=True to clean out the yum database
|
||||
before executing
|
||||
|
||||
Return a dict containing the new package names and versions:
|
||||
{'<package>': {'old': '<old-version>',
|
||||
'new': '<new-version>']}
|
||||
|
||||
CLI Example:
|
||||
salt '*' pkg.install <package name>
|
||||
'''
|
||||
old = list_pkgs()
|
||||
cmd = 'yum -y install ' + pkg
|
||||
if refresh:
|
||||
refresh_db()
|
||||
__salt__['cmd.run'](cmd)
|
||||
new = list_pkgs()
|
||||
pkgs = {}
|
||||
for npkg in new:
|
||||
if old.has_key(npkg):
|
||||
if old[npkg] == new[npkg]:
|
||||
# no change in the package
|
||||
continue
|
||||
else:
|
||||
# the package was here before and the version has changed
|
||||
pkgs[npkg] = {'old': old[npkg],
|
||||
'new': new[npkg]}
|
||||
else:
|
||||
# the package is freshly installed
|
||||
pkgs[npkg] = {'old': '',
|
||||
'new': new[npkg]}
|
||||
return pkgs
|
||||
|
||||
def upgrade():
|
||||
'''
|
||||
Run a full system upgrade, a yum upgrade
|
||||
|
||||
Return a dict containing the new package names and versions:
|
||||
{'<package>': {'old': '<old-version>',
|
||||
'new': '<new-version>']}
|
||||
|
||||
CLI Example:
|
||||
salt '*' pkg.upgrade
|
||||
'''
|
||||
old = list_pkgs()
|
||||
cmd = 'yum -y upgrade'
|
||||
__salt__['cmd.run'](cmd)
|
||||
new = list_pkgs()
|
||||
pkgs = {}
|
||||
for npkg in new:
|
||||
if old.has_key(npkg):
|
||||
if old[npkg] == new[npkg]:
|
||||
# no change in the package
|
||||
continue
|
||||
else:
|
||||
# the package was here before and the version has changed
|
||||
pkgs[npkg] = {'old': old[npkg],
|
||||
'new': new[npkg]}
|
||||
else:
|
||||
# the package is freshly installed
|
||||
pkgs[npkg] = {'old': '',
|
||||
'new': new[npkg]}
|
||||
return pkgs
|
||||
|
||||
def remove(pkg):
|
||||
'''
|
||||
Remove a single package with yum remove
|
||||
|
||||
Return a list containing the removed packages:
|
||||
|
||||
CLI Example:
|
||||
salt '*' pkg.remove <package name>
|
||||
'''
|
||||
old = list_pkgs()
|
||||
cmd = 'yum -y remove ' + pkg
|
||||
__salt__['cmd.run'](cmd)
|
||||
new = list_pkgs()
|
||||
return _list_removed(old, new)
|
||||
|
||||
def purge(pkg):
|
||||
'''
|
||||
Yum does not have a purge, this function calls remove
|
||||
|
||||
Return a list containing the removed packages:
|
||||
|
||||
CLI Example:
|
||||
salt '*' pkg.purge <package name>
|
||||
|
||||
'''
|
||||
return remove(pkg)
|
179
debian/salt/usr/share/pyshared/salt/output.py
vendored
179
debian/salt/usr/share/pyshared/salt/output.py
vendored
|
@ -1,179 +0,0 @@
|
|||
'''
|
||||
A simple way of setting the output format for data from modules
|
||||
'''
|
||||
# Import Python libs
|
||||
import yaml
|
||||
import pprint
|
||||
|
||||
# Conditionally import the json module
|
||||
try:
|
||||
import json
|
||||
JSON = True
|
||||
except ImportError:
|
||||
JSON = False
|
||||
|
||||
# Import Salt libs
|
||||
import salt.utils
|
||||
|
||||
__all__ = ('get_outputter',)
|
||||
|
||||
def remove_colors():
|
||||
'''
|
||||
Acces all of the utility colors and change them to empy strings
|
||||
'''
|
||||
|
||||
class Outputter(object):
|
||||
'''
|
||||
Class for outputting data to the screen.
|
||||
'''
|
||||
supports = None
|
||||
|
||||
@classmethod
|
||||
def check(klass, name):
|
||||
# Don't advertise Outputter classes for optional modules
|
||||
if hasattr(klass, "enabled") and not klass.enabled:
|
||||
return False
|
||||
return klass.supports == name
|
||||
|
||||
def __call__(self, data, **kwargs):
|
||||
pprint.pprint(data)
|
||||
|
||||
class HighStateOutputter(Outputter):
|
||||
'''
|
||||
Not a command line option, the HighStateOutputter is only meant to be used
|
||||
with the state.highstate function, or a function that returns highstate
|
||||
return data
|
||||
'''
|
||||
supports = 'highstate'
|
||||
def __call__(self, data, **kwargs):
|
||||
colors = salt.utils.get_colors(kwargs.get('color'))
|
||||
for host in data:
|
||||
hcolor = colors['GREEN']
|
||||
hstrs = []
|
||||
if isinstance(data[host], list):
|
||||
# Errors have been detected, list them in RED!
|
||||
hcolor = colors['RED_BOLD']
|
||||
hstrs.append(' {0}Data failed to compile:{1[ENDC]}'.format(hcolor, colors))
|
||||
for err in data[host]:
|
||||
hstrs.append('{0}----------\n {1}{2[ENDC]}'.format(hcolor, err, colors))
|
||||
if isinstance(data[host], dict):
|
||||
# Everything rendered as it should display the output
|
||||
for tname, ret in data[host].items():
|
||||
tcolor = colors['GREEN']
|
||||
if not ret['result']:
|
||||
hcolor = colors['RED']
|
||||
tcolor = colors['RED']
|
||||
comps = tname.split('.')
|
||||
hstrs.append('{0}----------\n State: - {1}{2[ENDC]}'.format(
|
||||
tcolor,
|
||||
comps[0],
|
||||
colors
|
||||
))
|
||||
hstrs.append(' {0}Name: {1}{2[ENDC]}'.format(
|
||||
tcolor,
|
||||
comps[1],
|
||||
colors
|
||||
))
|
||||
hstrs.append(' {0}Function: {1}{2[ENDC]}'.format(
|
||||
tcolor,
|
||||
comps[2],
|
||||
colors
|
||||
))
|
||||
hstrs.append(' {0}Result: {1}{2[ENDC]}'.format(
|
||||
tcolor,
|
||||
str(ret['result']),
|
||||
colors
|
||||
))
|
||||
hstrs.append(' {0}Comment: {1}{2[ENDC]}'.format(
|
||||
tcolor,
|
||||
ret['comment'],
|
||||
colors
|
||||
))
|
||||
changes = ' Changes: '
|
||||
for key in ret['changes']:
|
||||
if isinstance(ret['changes'][key], str):
|
||||
changes += key + ': ' + ret['changes'][key] + '\n '
|
||||
elif isinstance(ret['changes'][key], dict):
|
||||
changes += key + ': ' + pprint.pformat(ret['changes'][key]) + '\n '
|
||||
else:
|
||||
changes += key + ': ' + pprint.pformat(ret['changes'][key]) + '\n '
|
||||
hstrs.append('{0}{1}{2[ENDC]}'.format(
|
||||
tcolor,
|
||||
changes,
|
||||
colors
|
||||
))
|
||||
print '{0}{1}:{2[ENDC]}'.format(
|
||||
hcolor,
|
||||
host,
|
||||
colors)
|
||||
for hstr in hstrs:
|
||||
print hstr
|
||||
|
||||
|
||||
class RawOutputter(Outputter):
|
||||
'''
|
||||
Raw output. This calls repr() on the returned data.
|
||||
'''
|
||||
supports = "raw"
|
||||
def __call__(self, data, **kwargs):
|
||||
print data
|
||||
|
||||
class TxtOutputter(Outputter):
|
||||
'''
|
||||
Plain text output. Primarily for returning output from
|
||||
shell commands in the exact same way they would output
|
||||
on the shell when ran directly.
|
||||
'''
|
||||
supports = "txt"
|
||||
def __call__(self, data, **kwargs):
|
||||
if hasattr(data, "keys"):
|
||||
for key in data.keys():
|
||||
value = data[key]
|
||||
for line in value.split('\n'):
|
||||
print "{0}: {1}".format(key, line)
|
||||
else:
|
||||
# For non-dictionary data, just use print
|
||||
RawOutputter()(data)
|
||||
|
||||
class JSONOutputter(Outputter):
|
||||
'''
|
||||
JSON output.
|
||||
'''
|
||||
supports = "json"
|
||||
enabled = JSON
|
||||
|
||||
def __call__(self, data, **kwargs):
|
||||
try:
|
||||
# A good kwarg might be: indent=4
|
||||
if kwargs.has_key('color'):
|
||||
kwargs.pop('color')
|
||||
ret = json.dumps(data, **kwargs)
|
||||
except TypeError:
|
||||
# Return valid json for unserializable objects
|
||||
ret = json.dumps({})
|
||||
print ret
|
||||
|
||||
class YamlOutputter(Outputter):
|
||||
'''
|
||||
Yaml output. All of the cool kids are doing it.
|
||||
'''
|
||||
supports = "yaml"
|
||||
|
||||
def __call__(self, data, **kwargs):
|
||||
if kwargs.has_key('color'):
|
||||
kwargs.pop('color')
|
||||
print yaml.dump(data, **kwargs)
|
||||
|
||||
def get_outputter(name=None):
|
||||
'''
|
||||
Factory function for returning the right output class.
|
||||
|
||||
Usage:
|
||||
printout = get_outputter("txt")
|
||||
printout(ret)
|
||||
'''
|
||||
# Return an actual instance of the correct output class
|
||||
for i in Outputter.__subclasses__():
|
||||
if i.check(name):
|
||||
return i()
|
||||
return Outputter()
|
31
debian/salt/usr/share/pyshared/salt/payload.py
vendored
31
debian/salt/usr/share/pyshared/salt/payload.py
vendored
|
@ -1,31 +0,0 @@
|
|||
'''
|
||||
Many aspects of the salt payload need to be managed, from the return of
|
||||
encrypted keys to general payload dynamics and packaging, these happen in here
|
||||
'''
|
||||
import cPickle as pickle
|
||||
|
||||
def package(payload, protocol=2):
|
||||
'''
|
||||
This method for now just wraps pickle.dumps, but it is here so that we can
|
||||
make the serialization a custom option in the future with ease.
|
||||
'''
|
||||
return pickle.dumps(payload, protocol)
|
||||
|
||||
def unpackage(package_):
|
||||
'''
|
||||
Unpackages a payload
|
||||
'''
|
||||
return pickle.loads(package_)
|
||||
|
||||
def format_payload(enc, **kwargs):
|
||||
'''
|
||||
Pass in the required arguments for a payload, the enc type and the cmd,
|
||||
then a list of keyword args to generate the body of the load dict.
|
||||
'''
|
||||
payload = {'enc': enc}
|
||||
load = {}
|
||||
for key in kwargs:
|
||||
load[key] = kwargs[key]
|
||||
payload['load'] = load
|
||||
return package(payload)
|
||||
|
|
@ -1,25 +0,0 @@
|
|||
'''
|
||||
The default rendering engine, yaml_jinja, this renderer will take a yaml file
|
||||
with the jinja template and render it to a high data format for salt states.
|
||||
'''
|
||||
|
||||
# Import python libs
|
||||
import os
|
||||
import json
|
||||
|
||||
# Import Third Party libs
|
||||
from jinja2 import Template
|
||||
|
||||
def render(template):
|
||||
'''
|
||||
Render the data passing the functions and grains into the rendering system
|
||||
'''
|
||||
if not os.path.isfile(template):
|
||||
return {}
|
||||
passthrough = {}
|
||||
passthrough['salt'] = __salt__
|
||||
passthrough['grains'] = __grains__
|
||||
template = Template(open(template, 'r').read())
|
||||
json_data = template.render(**passthrough)
|
||||
return json.loads(json_data)
|
||||
|
|
@ -1,25 +0,0 @@
|
|||
'''
|
||||
The default rendering engine, yaml_jinja, this renderer will take a yaml file
|
||||
with the jinja template and render it to a high data format for salt states.
|
||||
'''
|
||||
|
||||
# Import python libs
|
||||
import os
|
||||
import json
|
||||
|
||||
# Import Third Party libs
|
||||
from mako.template import Template
|
||||
|
||||
def render(template):
|
||||
'''
|
||||
Render the data passing the functions and grains into the rendering system
|
||||
'''
|
||||
if not os.path.isfile(template):
|
||||
return {}
|
||||
passthrough = {}
|
||||
passthrough['salt'] = __salt__
|
||||
passthrough['grains'] = __grains__
|
||||
template = Template(open(template, 'r').read())
|
||||
json_data = template.render(**passthrough)
|
||||
return json.loads(json_data)
|
||||
|
|
@ -1,25 +0,0 @@
|
|||
'''
|
||||
The default rendering engine, yaml_jinja, this renderer will take a yaml file
|
||||
with the jinja template and render it to a high data format for salt states.
|
||||
'''
|
||||
|
||||
# Import python libs
|
||||
import os
|
||||
|
||||
# Import Third Party libs
|
||||
import yaml
|
||||
from jinja2 import Template
|
||||
|
||||
def render(template):
|
||||
'''
|
||||
Render the data passing the functions and grains into the rendering system
|
||||
'''
|
||||
if not os.path.isfile(template):
|
||||
return {}
|
||||
passthrough = {}
|
||||
passthrough['salt'] = __salt__
|
||||
passthrough['grains'] = __grains__
|
||||
template = Template(open(template, 'r').read())
|
||||
yaml_data = template.render(**passthrough)
|
||||
return yaml.load(yaml_data)
|
||||
|
|
@ -1,25 +0,0 @@
|
|||
'''
|
||||
yaml_mako, this renderer will take a yaml file
|
||||
within a mako template and render it to a high data format for salt states.
|
||||
'''
|
||||
|
||||
# Import python libs
|
||||
import os
|
||||
|
||||
# Import Third Party libs
|
||||
import yaml
|
||||
from mako.template import Template
|
||||
|
||||
def render(template):
|
||||
'''
|
||||
Render the data passing the functions and grains into the rendering system
|
||||
'''
|
||||
if not os.path.isfile(template):
|
||||
return {}
|
||||
passthrough = {}
|
||||
passthrough['salt'] = __salt__
|
||||
passthrough['grains'] = __grains__
|
||||
template = Template(open(template, 'r').read())
|
||||
yaml_data = template.render(**passthrough)
|
||||
return yaml.load(yaml_data)
|
||||
|
|
@ -1,10 +0,0 @@
|
|||
'''
|
||||
The local returner is used to test the returner interface, it just prints the
|
||||
return data to the console to verify that it is being passed properly
|
||||
'''
|
||||
|
||||
def returner(ret):
|
||||
'''
|
||||
Print the return data to the terminal to verify functionality
|
||||
'''
|
||||
print ret
|
|
@ -1,43 +0,0 @@
|
|||
'''
|
||||
Return data to a mongodb server
|
||||
|
||||
This is the default interface for returning data for the butter statd subsytem
|
||||
'''
|
||||
|
||||
import logging
|
||||
import pymongo
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
__opts__ = {
|
||||
'mongo.host': 'salt',
|
||||
'mongo.port': 27017,
|
||||
'mongo.db': 'salt',
|
||||
'mongo.user': '',
|
||||
'mongo.password': '',
|
||||
}
|
||||
|
||||
def returner(ret):
|
||||
'''
|
||||
Return data to a mongodb server
|
||||
'''
|
||||
conn = pymongo.Connection(
|
||||
__opts__['mongo.host'],
|
||||
__opts__['mongo.port'],
|
||||
)
|
||||
db = conn[__opts__['mongo.db']]
|
||||
|
||||
user = __opts__.get('mongo.user')
|
||||
password = __opts__.get('mongo.password')
|
||||
if user and password:
|
||||
db.authenticate(user, password)
|
||||
|
||||
col = db[ret['id']]
|
||||
back = {}
|
||||
if type(ret['return']) == type(dict()):
|
||||
for key in ret['return']:
|
||||
back[key.replace('.', '-')] = ret['return'][key]
|
||||
else:
|
||||
back = ret['return']
|
||||
log.debug( back )
|
||||
col.insert({ret['jid']: back})
|
|
@ -1,27 +0,0 @@
|
|||
'''
|
||||
Return data to a redis server
|
||||
This is a VERY simple example for pushing data to a redis server and is not
|
||||
necessarily intended as a usable interface.
|
||||
'''
|
||||
|
||||
import redis
|
||||
import json
|
||||
|
||||
__opts__ = {
|
||||
'redis.host': 'mcp',
|
||||
'redis.port': 6379,
|
||||
'redis.db': '0',
|
||||
}
|
||||
|
||||
def returner(ret):
|
||||
'''
|
||||
Return data to a redis data store
|
||||
'''
|
||||
serv = redis.Redis(
|
||||
host=__opts__['redis.host'],
|
||||
port=__opts__['redis.port'],
|
||||
db=__opts__['redis.db'])
|
||||
serv.sadd("%(id)s:jobs" % ret, ret['jid'])
|
||||
serv.set("%(jid)s:%(id)s" % ret, json.dumps(ret['return']))
|
||||
serv.sadd('jobs', ret['jid'])
|
||||
serv.sadd(ret['jid'], ret['id'])
|
50
debian/salt/usr/share/pyshared/salt/runner.py
vendored
50
debian/salt/usr/share/pyshared/salt/runner.py
vendored
|
@ -1,50 +0,0 @@
|
|||
'''
|
||||
Execute salt convenience routines
|
||||
'''
|
||||
|
||||
# Import python modules
|
||||
import sys
|
||||
|
||||
# Import salt modules
|
||||
import salt.loader
|
||||
|
||||
class Runner(object):
|
||||
'''
|
||||
Execute the salt runner interface
|
||||
'''
|
||||
def __init__(self, opts):
|
||||
self.opts = opts
|
||||
self.functions = salt.loader.runner(opts)
|
||||
|
||||
def _verify_fun(self):
|
||||
'''
|
||||
Verify an environmental issues
|
||||
'''
|
||||
if not self.opts['fun']:
|
||||
err = 'Must pass a runner function'
|
||||
sys.stderr.write('%s\n' % err)
|
||||
sys.exit(1)
|
||||
if not self.functions.has_key(self.opts['fun']):
|
||||
err = 'Passed function is unavailable'
|
||||
sys.stderr.write('%s\n' % err)
|
||||
sys.exit(1)
|
||||
|
||||
def _print_docs(self):
|
||||
'''
|
||||
Print out the documentation!
|
||||
'''
|
||||
for fun in sorted(self.functions):
|
||||
if fun.startswith(self.opts['fun']):
|
||||
print fun + ':'
|
||||
print self.functions[fun].__doc__
|
||||
print ''
|
||||
|
||||
def run(self):
|
||||
'''
|
||||
Execuete the runner sequence
|
||||
'''
|
||||
if self.opts['doc']:
|
||||
self._print_docs()
|
||||
else:
|
||||
self._verify_fun()
|
||||
self.functions[self.opts['fun']](*self.opts['arg'])
|
|
@ -1,30 +0,0 @@
|
|||
'''
|
||||
General management functions for salt, tools like seeing what hosts are up
|
||||
and what hosts are down
|
||||
'''
|
||||
|
||||
# Import salt modules
|
||||
import salt.client
|
||||
import salt.cli.key
|
||||
|
||||
def down():
|
||||
'''
|
||||
Print a list of all the down or unresponsive salt minions
|
||||
'''
|
||||
client = salt.client.LocalClient(__opts__['config'])
|
||||
key = salt.cli.key.Key(__opts__)
|
||||
minions = client.cmd('*', 'test.ping', timeout=1)
|
||||
keys = key._keys('acc')
|
||||
for minion in minions:
|
||||
keys.remove(minion)
|
||||
for minion in sorted(keys):
|
||||
print minion
|
||||
|
||||
def up():
|
||||
'''
|
||||
Print a list of all of the minions that are up
|
||||
'''
|
||||
client = salt.client.LocalClient(__opts__['config'])
|
||||
minions = client.cmd('*', 'test.ping', timeout=1)
|
||||
for minion in sorted(minions):
|
||||
print minion
|
530
debian/salt/usr/share/pyshared/salt/state.py
vendored
530
debian/salt/usr/share/pyshared/salt/state.py
vendored
|
@ -1,530 +0,0 @@
|
|||
'''
|
||||
The module used to execute states in salt. A state is unlike a module execution
|
||||
in that instead of just executing a command it ensure that a certain state is
|
||||
present on the system.
|
||||
|
||||
The data sent to the state calls is as follows:
|
||||
{ 'state': '<state module name>',
|
||||
'fun': '<state function name>',
|
||||
'name': '<the name argument passed to all states>'
|
||||
'argn': '<arbitrairy argument, can have many of these>'
|
||||
}
|
||||
'''
|
||||
# Import python modules
|
||||
import os
|
||||
import copy
|
||||
import inspect
|
||||
import tempfile
|
||||
import logging
|
||||
# Import Salt modules
|
||||
import salt.loader
|
||||
import salt.minion
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
def format_log(ret):
|
||||
'''
|
||||
Format the state into a log message
|
||||
'''
|
||||
msg = ''
|
||||
if type(ret) == type(dict()):
|
||||
# Looks like the ret may be a valid state return
|
||||
if ret.has_key('changes'):
|
||||
# Yep, looks like a valid state return
|
||||
chg = ret['changes']
|
||||
if not chg:
|
||||
msg = 'No changes made for {0[name]}'.format(ret)
|
||||
elif type(chg) == type(dict()):
|
||||
if chg.has_key('diff'):
|
||||
if type(chg['diff']) == type(str()):
|
||||
msg = 'File changed:\n{0}'.format(
|
||||
chg['diff'])
|
||||
if type(chg[chg.keys()[0]]) == type(dict()):
|
||||
if chg[chg.keys()[0]].has_key('new'):
|
||||
# This is the return data from a package install
|
||||
msg = 'Installed Packages:\n'
|
||||
for pkg in chg:
|
||||
old = 'absent'
|
||||
if chg[pkg]['old']:
|
||||
old = chg[pkg]['old']
|
||||
msg += '{0} changed from {1} to {2}\n'.format(
|
||||
pkg, old, chg[pkg]['new'])
|
||||
if not msg:
|
||||
msg = str(ret['changes'])
|
||||
if ret['result']:
|
||||
log.info(msg)
|
||||
else:
|
||||
log.error(msg)
|
||||
else:
|
||||
# catch unhandled data
|
||||
log.info(str(ret))
|
||||
|
||||
class StateError(Exception): pass
|
||||
|
||||
class State(object):
|
||||
'''
|
||||
Class used to execute salt states
|
||||
'''
|
||||
def __init__(self, opts):
|
||||
if not opts.has_key('grains'):
|
||||
opts['grains'] = salt.loader.grains(opts)
|
||||
self.opts = opts
|
||||
self.functions = salt.loader.minion_mods(self.opts)
|
||||
self.states = salt.loader.states(self.opts, self.functions)
|
||||
self.rend = salt.loader.render(self.opts, self.functions)
|
||||
|
||||
def verify_data(self, data):
|
||||
'''
|
||||
Verify the data, return an error statement if something is wrong
|
||||
'''
|
||||
errors = []
|
||||
if not data.has_key('state'):
|
||||
errors.append('Missing "state" data')
|
||||
if not data.has_key('fun'):
|
||||
errors.append('Missing "fun" data')
|
||||
if not data.has_key('name'):
|
||||
errors.append('Missing "name" data')
|
||||
if errors:
|
||||
return errors
|
||||
full = data['state'] + '.' + data['fun']
|
||||
if not self.states.has_key(full):
|
||||
if data.has_key('__sls__'):
|
||||
errors.append(
|
||||
'State {0} found in sls {1} is unavailable'.format(
|
||||
full,
|
||||
data['__sls__']
|
||||
)
|
||||
)
|
||||
else:
|
||||
errors.append('Specified state ' + full + ' is unavailable.')
|
||||
else:
|
||||
aspec = inspect.getargspec(self.states[full])
|
||||
arglen = 0
|
||||
deflen = 0
|
||||
if type(aspec[0]) == type(list()):
|
||||
arglen = len(aspec[0])
|
||||
if type(aspec[3]) == type(tuple()):
|
||||
deflen = len(aspec[3])
|
||||
for ind in range(arglen - deflen):
|
||||
if not data.has_key(aspec[0][ind]):
|
||||
errors.append('Missing paramater ' + aspec[0][ind]\
|
||||
+ ' for state ' + full)
|
||||
return errors
|
||||
|
||||
def verify_high(self, high):
|
||||
'''
|
||||
Verify that the high data is viable and follows the data structure
|
||||
'''
|
||||
errors = []
|
||||
if not isinstance(high, dict):
|
||||
errors.append('High data is not a dictonary and is invalid')
|
||||
for name, body in high.items():
|
||||
if not isinstance(body, dict):
|
||||
err = 'The type {0} is not formated as a dictonary'.format(name)
|
||||
errors.append(err)
|
||||
continue
|
||||
for state, run in body.items():
|
||||
pass
|
||||
return errors
|
||||
|
||||
def verify_chunks(self, chunks):
|
||||
'''
|
||||
Verify the chunks in a list of low data structures
|
||||
'''
|
||||
err = []
|
||||
for chunk in chunks:
|
||||
err += self.verify_data(chunk)
|
||||
return err
|
||||
|
||||
def format_call(self, data):
|
||||
'''
|
||||
Formats low data into a list of dict's used to actually call the state,
|
||||
returns:
|
||||
{
|
||||
'full': 'module.function',
|
||||
'args': [arg[0], arg[1], ...]
|
||||
}
|
||||
used to call the function like this:
|
||||
self.states[ret['full']](*ret['args'])
|
||||
|
||||
It is assumed that the passed data has already been verified with
|
||||
verify_data
|
||||
'''
|
||||
ret = {}
|
||||
ret['full'] = data['state'] + '.' + data['fun']
|
||||
ret['args'] = []
|
||||
aspec = inspect.getargspec(self.states[ret['full']])
|
||||
arglen = 0
|
||||
deflen = 0
|
||||
if type(aspec[0]) == type(list()):
|
||||
arglen = len(aspec[0])
|
||||
if type(aspec[3]) == type(tuple()):
|
||||
deflen = len(aspec[3])
|
||||
kwargs = {}
|
||||
for ind in range(arglen - 1, 0, -1):
|
||||
minus = arglen - ind
|
||||
if deflen - minus > -1:
|
||||
kwargs[aspec[0][ind]] = aspec[3][-minus]
|
||||
for arg in kwargs:
|
||||
if data.has_key(arg):
|
||||
kwargs[arg] = data[arg]
|
||||
for arg in aspec[0]:
|
||||
if kwargs.has_key(arg):
|
||||
ret['args'].append(kwargs[arg])
|
||||
else:
|
||||
ret['args'].append(data[arg])
|
||||
return ret
|
||||
|
||||
def compile_high_data(self, high):
|
||||
'''
|
||||
"Compile" the high data as it is retrieved from the cli or yaml into
|
||||
the individual state executor structures
|
||||
'''
|
||||
chunks = []
|
||||
for name, body in high.items():
|
||||
for state, run in body.items():
|
||||
if state.startswith('__'):
|
||||
continue
|
||||
chunk = {'state': state,
|
||||
'name': name}
|
||||
if body.has_key('__sls__'):
|
||||
chunk['__sls__'] = body['__sls__']
|
||||
funcs = set()
|
||||
names = set()
|
||||
for arg in run:
|
||||
if type(arg) == type(str()):
|
||||
funcs.add(arg)
|
||||
continue
|
||||
if type(arg) == type(dict()):
|
||||
for key, val in arg.items():
|
||||
if key == 'names':
|
||||
names.update(val)
|
||||
continue
|
||||
else:
|
||||
chunk.update(arg)
|
||||
if names:
|
||||
for name in names:
|
||||
live = copy.deepcopy(chunk)
|
||||
live['name'] = name
|
||||
for fun in funcs:
|
||||
live['fun'] = fun
|
||||
chunks.append(live)
|
||||
else:
|
||||
live = copy.deepcopy(chunk)
|
||||
for fun in funcs:
|
||||
live['fun'] = fun
|
||||
chunks.append(live)
|
||||
|
||||
return sorted(chunks, key=lambda k: k['state'] + k['name'] + k['fun'])
|
||||
|
||||
def compile_template(self, template):
|
||||
'''
|
||||
Take the path to a template and return the high data structure derived
|
||||
from the template.
|
||||
'''
|
||||
if not isinstance(template, str):
|
||||
return {}
|
||||
if not os.path.isfile(template):
|
||||
return {}
|
||||
return self.rend[self.opts['renderer']](template)
|
||||
|
||||
def compile_template_str(self, template):
|
||||
'''
|
||||
Take the path to a template and return the high data structure derived
|
||||
from the template.
|
||||
'''
|
||||
fn_ = tempfile.mkstemp()[1]
|
||||
open(fn_, 'w+').write(template)
|
||||
high = self.rend[self.opts['renderer']](fn_)
|
||||
os.remove(fn_)
|
||||
return high
|
||||
|
||||
def call(self, data):
|
||||
'''
|
||||
Call a state directly with the low data structure, verify data before
|
||||
processing.
|
||||
'''
|
||||
log.info(
|
||||
'Executing state {0[state]}.{0[fun]} for {0[name]}'.format(data)
|
||||
)
|
||||
cdata = self.format_call(data)
|
||||
ret = self.states[cdata['full']](*cdata['args'])
|
||||
format_log(ret)
|
||||
return ret
|
||||
|
||||
def call_chunks(self, chunks):
|
||||
'''
|
||||
Iterate over a list of chunks and call them, checking for requires.
|
||||
'''
|
||||
running = {}
|
||||
for low in chunks:
|
||||
running = self.call_chunk(low, running, chunks)
|
||||
return running
|
||||
|
||||
def check_requires(self, low, running, chunks):
|
||||
'''
|
||||
Look into the running data to see if the requirement has been met
|
||||
'''
|
||||
if not low.has_key('require'):
|
||||
return 'met'
|
||||
reqs = []
|
||||
status = 'unmet'
|
||||
for req in low['require']:
|
||||
for chunk in chunks:
|
||||
if chunk['name'] == req[req.keys()[0]]:
|
||||
if chunk['state'] == req.keys()[0]:
|
||||
reqs.append(chunk)
|
||||
fun_stats = []
|
||||
for req in reqs:
|
||||
tag = req['state'] + '.' + req['name'] + '.' + req['fun']
|
||||
if not running.has_key(tag):
|
||||
fun_stats.append('unmet')
|
||||
else:
|
||||
fun_stats.append('met' if running[tag]['result'] else 'fail')
|
||||
for stat in fun_stats:
|
||||
if stat == 'unmet':
|
||||
return stat
|
||||
elif stat == 'fail':
|
||||
return stat
|
||||
return 'met'
|
||||
|
||||
def check_watchers(self, low, running, chunks):
|
||||
'''
|
||||
Look into the running data to see if the watched states have been run
|
||||
'''
|
||||
if not low.has_key('watch'):
|
||||
return 'nochange'
|
||||
reqs = []
|
||||
status = 'unmet'
|
||||
for req in low['watch']:
|
||||
for chunk in chunks:
|
||||
if chunk['name'] == req[req.keys()[0]]:
|
||||
if chunk['state'] == req.keys()[0]:
|
||||
reqs.append(chunk)
|
||||
fun_stats = []
|
||||
for req in reqs:
|
||||
tag = req['state'] + '.' + req['name'] + '.' + req['fun']
|
||||
if not running.has_key(tag):
|
||||
fun_stats.append('unmet')
|
||||
else:
|
||||
fun_stats.append('change' if running[tag]['changes'] else 'nochange')
|
||||
for stat in fun_stats:
|
||||
if stat == 'change':
|
||||
return stat
|
||||
elif stat == 'unmet':
|
||||
return stat
|
||||
return 'nochange'
|
||||
|
||||
def call_chunk(self, low, running, chunks):
|
||||
'''
|
||||
Check if a chunk has any requires, execute the requires and then the
|
||||
chunk
|
||||
'''
|
||||
tag = low['state'] + '.' + low['name'] + '.' + low['fun']
|
||||
if low.has_key('require'):
|
||||
status = self.check_requires(low, running, chunks)
|
||||
if status == 'unmet':
|
||||
reqs = []
|
||||
for req in low['require']:
|
||||
for chunk in chunks:
|
||||
if chunk['name'] == req[req.keys()[0]]:
|
||||
if chunk['state'] == req.keys()[0]:
|
||||
reqs.append(chunk)
|
||||
for chunk in reqs:
|
||||
running = self.call_chunk(chunk, running, chunks)
|
||||
running = self.call_chunk(low, running, chunks)
|
||||
elif status == 'met':
|
||||
running[tag] = self.call(low)
|
||||
elif status == 'fail':
|
||||
running[tag] = {'changes': {},
|
||||
'result': False,
|
||||
'comment': 'One or more require failed'}
|
||||
elif low.has_key('watch'):
|
||||
status = self.check_watchers(low, running, chunks)
|
||||
if status == 'unmet':
|
||||
reqs = []
|
||||
for req in low['watch']:
|
||||
for chunk in chunks:
|
||||
if chunk['name'] == req[req.keys()[0]]:
|
||||
if chunk['state'] == req.keys()[0]:
|
||||
reqs.append(chunk)
|
||||
for chunk in reqs:
|
||||
running = self.call_chunk(chunk, running, chunks)
|
||||
running = self.call_chunk(low, running, chunks)
|
||||
elif status == 'nochange':
|
||||
running[tag] = self.call(low)
|
||||
elif status == 'change':
|
||||
ret = self.call(low)
|
||||
if not ret['changes']:
|
||||
low['fun'] = 'watcher'
|
||||
ret = self.call(low)
|
||||
running[tag] = ret
|
||||
else:
|
||||
running[tag] = self.call(low)
|
||||
return running
|
||||
|
||||
def call_high(self, high):
|
||||
'''
|
||||
Process a high data call and ensure the defined states.
|
||||
'''
|
||||
err = []
|
||||
rets = []
|
||||
errors = self.verify_high(high)
|
||||
if errors:
|
||||
return errors
|
||||
chunks = self.compile_high_data(high)
|
||||
errors += self.verify_chunks(chunks)
|
||||
if errors:
|
||||
return errors
|
||||
return self.call_chunks(chunks)
|
||||
|
||||
def call_template(self, template):
|
||||
'''
|
||||
Enforce the states in a template
|
||||
'''
|
||||
high = self.compile_template(template)
|
||||
if high:
|
||||
return self.call_high(high)
|
||||
return high
|
||||
|
||||
def call_template_str(self, template):
|
||||
'''
|
||||
Enforce the states in a template, pass the template as a string
|
||||
'''
|
||||
high = self.compile_template_str(template)
|
||||
if high:
|
||||
return self.call_high(high)
|
||||
return high
|
||||
|
||||
class HighState(object):
|
||||
'''
|
||||
Generate and execute the salt "High State". The High State is the compound
|
||||
state derived from a group of template files stored on the salt master or
|
||||
in a the local cache.
|
||||
'''
|
||||
def __init__(self, opts):
|
||||
self.client = salt.minion.FileClient(opts)
|
||||
self.opts = self.__gen_opts(opts)
|
||||
self.state = State(self.opts)
|
||||
self.matcher = salt.minion.Matcher(self.opts)
|
||||
|
||||
def __gen_opts(self, opts):
|
||||
'''
|
||||
The options used by the High State object are derived from options on
|
||||
the minion and the master, or just the minion if the high state call is
|
||||
entirely local.
|
||||
'''
|
||||
# If the state is intended to be applied locally, then the local opts
|
||||
# should have all of the needed data, otherwise overwrite the local
|
||||
# data items with data from the master
|
||||
if opts.has_key('local_state'):
|
||||
if opts['local_state']:
|
||||
return opts
|
||||
mopts = self.client.master_opts()
|
||||
opts['renderer'] = mopts['renderer']
|
||||
if mopts['state_top'].startswith('salt://'):
|
||||
opts['state_top'] = mopts['state_top']
|
||||
elif mopts['state_top'].startswith('/'):
|
||||
opts['state_top'] = os.path.join('salt://', mopts['state_top'][1:])
|
||||
else:
|
||||
opts['state_top'] = os.path.join('salt://', mopts['state_top'])
|
||||
return opts
|
||||
|
||||
def get_top(self):
|
||||
'''
|
||||
Returns the high data derived from the top file
|
||||
'''
|
||||
top = self.client.cache_file(self.opts['state_top'], 'base')
|
||||
return self.state.compile_template(top)
|
||||
|
||||
def top_matches(self, top):
|
||||
'''
|
||||
Search through the top high data for matches and return the states that
|
||||
this minion needs to execute.
|
||||
|
||||
Returns:
|
||||
{'env': ['state1', 'state2', ...]}
|
||||
'''
|
||||
matches = {}
|
||||
for env, body in top.items():
|
||||
for match, data in body.items():
|
||||
if self.matcher.confirm_top(match, data):
|
||||
if not matches.has_key(env):
|
||||
matches[env] = []
|
||||
for item in data:
|
||||
if type(item) == type(str()):
|
||||
matches[env].append(item)
|
||||
return matches
|
||||
|
||||
def gather_states(self, matches):
|
||||
'''
|
||||
Gather the template files from the master
|
||||
'''
|
||||
group = []
|
||||
for env, states in matches.items():
|
||||
for sls in states:
|
||||
state = self.client.get_state(sls, env)
|
||||
if state:
|
||||
group.append(state)
|
||||
return group
|
||||
|
||||
def render_state(self, sls, env, mods):
|
||||
'''
|
||||
Render a state file and retrive all of the include states
|
||||
'''
|
||||
errors = []
|
||||
fn_ = self.client.get_state(sls, env)
|
||||
state = None
|
||||
try:
|
||||
state = self.state.compile_template(fn_)
|
||||
except Exception as exc:
|
||||
errors.append('Rendering SLS {0} failed, render error:\n{1}'.format(sls, exc))
|
||||
mods.add(sls)
|
||||
nstate = None
|
||||
if state:
|
||||
if not isinstance(state, dict):
|
||||
errors.append('SLS {0} does not render to a dictonary'.format(sls))
|
||||
else:
|
||||
if state.has_key('include'):
|
||||
for sub_sls in state.pop('include'):
|
||||
if not list(mods).count(sub_sls):
|
||||
nstate, mods, err = self.render_state(sub_sls, env, mods)
|
||||
if nstate:
|
||||
state.update(nstate)
|
||||
if err:
|
||||
errors += err
|
||||
for name in state:
|
||||
if not isinstance(state[name], dict):
|
||||
errors.append('Name {0} in sls {1} is not a dictonary'.format(name, sls))
|
||||
continue
|
||||
if not state[name].has_key('__sls__'):
|
||||
state[name]['__sls__'] = sls
|
||||
return state, mods, errors
|
||||
|
||||
def render_highstate(self, matches):
|
||||
'''
|
||||
Gather the state files and render them into a single unified salt high
|
||||
data structure.
|
||||
'''
|
||||
highstate = {}
|
||||
errors = []
|
||||
for env, states in matches.items():
|
||||
mods = set()
|
||||
for sls in states:
|
||||
state, mods, err = self.render_state(sls, env, mods)
|
||||
if state:
|
||||
highstate.update(state)
|
||||
if err:
|
||||
errors += err
|
||||
return highstate, errors
|
||||
|
||||
def call_highstate(self):
|
||||
'''
|
||||
Run the sequence to execute the salt highstate for this minion
|
||||
'''
|
||||
top = self.get_top()
|
||||
matches = self.top_matches(top)
|
||||
high, errors = self.render_highstate(matches)
|
||||
if errors:
|
||||
return errors
|
||||
return self.state.call_high(high)
|
|
@ -1,69 +0,0 @@
|
|||
'''
|
||||
Manage command executions cron a state perspective
|
||||
'''
|
||||
|
||||
import os
|
||||
import pwd
|
||||
import grp
|
||||
|
||||
def run(name,
|
||||
onlyif=None,
|
||||
unless=None,
|
||||
cwd='/root',
|
||||
user=None,
|
||||
group=None):
|
||||
'''
|
||||
Ensure that the named command is executed
|
||||
|
||||
Arguments:
|
||||
name -- The command to run
|
||||
|
||||
Keyword Argument:
|
||||
onlyif -- Only run the main command if this command returns true
|
||||
unless -- Only run the main command if this command returns False
|
||||
cwd -- Run the command from this directory, defaults to /root
|
||||
user -- Run the command as this user
|
||||
group -- run the command as this group
|
||||
'''
|
||||
ret = {'name': name,
|
||||
'changes': {},
|
||||
'result': False,
|
||||
'comment': ''}
|
||||
if onlyif:
|
||||
if __salt__['cmd.retcode'](onlyif) != 0:
|
||||
ret['comment'] = 'onlyif exec failed'
|
||||
ret['result'] = True
|
||||
return ret
|
||||
if unless:
|
||||
if __salt__['cmd.retcode'](unless) == 0:
|
||||
ret['comment'] = 'unless executed successfully'
|
||||
ret['result'] = True
|
||||
return ret
|
||||
if not os.path.isdir(cwd):
|
||||
ret['comment'] = 'Desired working directory is not available'
|
||||
return ret
|
||||
puid = os.geteuid()
|
||||
pgid = os.getegid()
|
||||
if group:
|
||||
try:
|
||||
egid = grp.getgrnam(group).gr_gid
|
||||
os.setegid(egid)
|
||||
except KeyError:
|
||||
ret['comment'] = 'The group ' + group + ' is not available'
|
||||
return ret
|
||||
if user:
|
||||
try:
|
||||
euid = pwd.getpwnam(user).pw_uid
|
||||
os.seteuid(euid)
|
||||
except KeyError:
|
||||
ret['comment'] = 'The user ' + user + ' is not available'
|
||||
return ret
|
||||
# Wow, we pased the test, run this sucker!
|
||||
cmd_all = __salt__['cmd.run_all'](name, cwd)
|
||||
ret['changes'] = cmd_all
|
||||
ret['result'] = not bool(cmd_all['retcode'])
|
||||
ret['comment'] = 'Command "' + name + '" run'
|
||||
os.seteuid(puid)
|
||||
os.setegid(pgid)
|
||||
return ret
|
||||
|
259
debian/salt/usr/share/pyshared/salt/states/file.py
vendored
259
debian/salt/usr/share/pyshared/salt/states/file.py
vendored
|
@ -1,259 +0,0 @@
|
|||
'''
|
||||
Manage file states
|
||||
'''
|
||||
|
||||
import os
|
||||
import shutil
|
||||
import tempfile
|
||||
import difflib
|
||||
import hashlib
|
||||
import traceback
|
||||
|
||||
def _makedirs(path):
|
||||
'''
|
||||
Ensure that the directory containing this path is available.
|
||||
'''
|
||||
if not os.path.isdir(os.path.dirname(path)):
|
||||
os.makedirs(os.path.dirname(path))
|
||||
|
||||
def _is_bin(path):
|
||||
'''
|
||||
Return True if a file is a bin, just checks for NULL char, this should be
|
||||
expanded to reflect how git checks for bins
|
||||
'''
|
||||
if open(path, 'rb').read(2048).count('\0'):
|
||||
return True
|
||||
return False
|
||||
|
||||
def _mako(sfn):
|
||||
'''
|
||||
Render a jinja2 template, returns the location of the rendered file,
|
||||
return False if render fails.
|
||||
Returns:
|
||||
{'result': bool,
|
||||
'data': <Error data or rendered file path>}
|
||||
'''
|
||||
try:
|
||||
from mako.template import Template
|
||||
except ImportError:
|
||||
return {'result': False,
|
||||
'data': 'Failed to import jinja'}
|
||||
try:
|
||||
tgt = tempfile.mkstemp()[1]
|
||||
passthrough = {}
|
||||
passthrough.update(__salt__)
|
||||
passthrough.update(__grains__)
|
||||
template = Template(open(sfn, 'r').read())
|
||||
open(tgt, 'w+').write(template.render(**passthrough))
|
||||
return {'result': True,
|
||||
'data': tgt}
|
||||
except:
|
||||
trb = traceback.format_exc()
|
||||
return {'result': False,
|
||||
'data': trb}
|
||||
|
||||
def _jinja(sfn):
|
||||
'''
|
||||
Render a jinja2 template, returns the location of the rendered file,
|
||||
return False if render fails.
|
||||
Returns:
|
||||
{'result': bool,
|
||||
'data': <Error data or rendered file path>}
|
||||
'''
|
||||
try:
|
||||
from jinja2 import Template
|
||||
except ImportError:
|
||||
return {'result': False,
|
||||
'data': 'Failed to import jinja'}
|
||||
try:
|
||||
tgt = tempfile.mkstemp()[1]
|
||||
passthrough = {}
|
||||
passthrough.update(__salt__)
|
||||
passthrough.update(__grains__)
|
||||
template = Template(open(sfn, 'r').read())
|
||||
open(tgt, 'w+').write(template.render(**passthrough))
|
||||
return {'result': True,
|
||||
'data': tgt}
|
||||
except:
|
||||
trb = traceback.format_exc()
|
||||
return {'result': False,
|
||||
'data': trb}
|
||||
|
||||
def managed(name,
|
||||
source,
|
||||
user=None,
|
||||
group=None,
|
||||
mode=None,
|
||||
template=None,
|
||||
makedirs=False):
|
||||
'''
|
||||
Manage a given file
|
||||
'''
|
||||
if mode:
|
||||
mode = str(mode)
|
||||
ret = {'name': name,
|
||||
'changes': {},
|
||||
'result': True,
|
||||
'comment': ''}
|
||||
# Check changes if the target file exists
|
||||
if os.path.isfile(name):
|
||||
# Check sums
|
||||
source_sum = __salt__['cp.hash_file'](source)
|
||||
if not source_sum:
|
||||
ret['result'] = False
|
||||
ret['comment'] = 'Source file {0} not found'.format(source)
|
||||
return ret
|
||||
name_sum = getattr(hashlib, source_sum['hash_type'])(open(name,
|
||||
'rb').read()).hexdigest()
|
||||
# Check if file needs to be replaced
|
||||
if source_sum['hsum'] != name_sum:
|
||||
sfn = __salt__['cp.cache_file'](source)
|
||||
if not sfn:
|
||||
ret['result'] = False
|
||||
ret['comment'] = 'Source file {0} not found'.format(source)
|
||||
return ret
|
||||
# If the source file is a template render it accordingly
|
||||
if template:
|
||||
t_key = '_' + template
|
||||
if locals().has_key(t_key):
|
||||
data = locals()[t_key](sfn)
|
||||
if data['result']:
|
||||
sfn = data['data']
|
||||
else:
|
||||
ret['result'] = False
|
||||
ret['comment'] = data['data']
|
||||
return ret
|
||||
# Check to see if the files are bins
|
||||
if _is_bin(sfn) or _is_bin(name):
|
||||
ret['changes']['diff'] = 'Replace binary file'
|
||||
else:
|
||||
slines = open(sfn, 'rb').readlines()
|
||||
nlines = open(name, 'rb').readlines()
|
||||
ret['changes']['diff'] = '\n'.join(difflib.unified_diff(slines, nlines))
|
||||
# Pre requs are met, and the file needs to be replaced, do it
|
||||
if not __opts__['test']:
|
||||
shutil.copy(sfn, name)
|
||||
# Check permissions
|
||||
perms = {}
|
||||
perms['luser'] = __salt__['file.get_user'](name)
|
||||
perms['lgroup'] = __salt__['file.get_group'](name)
|
||||
perms['lmode'] = __salt__['file.get_mode'](name)
|
||||
# Run through the perms and detect and apply the needed changes
|
||||
if user:
|
||||
if user != perms['luser']:
|
||||
perms['cuser'] = user
|
||||
if group:
|
||||
if group != perms['lgroup']:
|
||||
perms['cgroup'] = group
|
||||
if perms.has_key('cuser') or perms.has_key('cgroup'):
|
||||
if not __opts__['test']:
|
||||
__salt__['file.chown'](
|
||||
name,
|
||||
user,
|
||||
group
|
||||
)
|
||||
if mode:
|
||||
if mode != perms['lmode']:
|
||||
if not __opts__['test']:
|
||||
__salt__['file.set_mode'](name, mode)
|
||||
if mode != __salt__['file.get_mode'](name):
|
||||
ret['result'] = False
|
||||
ret['comment'] += 'Mode not changed '
|
||||
else:
|
||||
ret['changes']['mode'] = mode
|
||||
if user:
|
||||
if user != __salt__['file.get_user'](name):
|
||||
ret['result'] = False
|
||||
ret['comment'] = 'Failed to change user to {0} '.format(user)
|
||||
elif perms.has_key('cuser'):
|
||||
ret['changes']['user'] = user
|
||||
if group:
|
||||
if group != __salt__['file.get_group'](name):
|
||||
ret['result'] = False
|
||||
ret['comment'] += 'Failed to change group to {0} '.format(group)
|
||||
elif perms.has_key('cgroup'):
|
||||
ret['changes']['group'] = group
|
||||
|
||||
if not ret['comment']:
|
||||
ret['comment'] = 'File {0} updated'.format(name)
|
||||
|
||||
if __opts__['test']:
|
||||
ret['comment'] = 'File {0} not updated'.format(name)
|
||||
elif not ret['changes'] and ret['result']:
|
||||
ret['comment'] = 'File {0} is in the correct state'.format(name)
|
||||
return ret
|
||||
else:
|
||||
# The file is not currently present, throw it down, log all changes
|
||||
sfn = __salt__['cp.cache_file'](source)
|
||||
if not sfn:
|
||||
ret['result'] = False
|
||||
ret['comment'] = 'Source file {0} not found'.format(source)
|
||||
return ret
|
||||
# Handle any template management that is needed
|
||||
if template:
|
||||
t_key = '_' + template
|
||||
if locals().has_key(t_key):
|
||||
data = locals()[t_key](sfn)
|
||||
if data['result']:
|
||||
sfn = data['data']
|
||||
else:
|
||||
ret['result'] = False
|
||||
return ret
|
||||
# It is a new file, set the diff accordingly
|
||||
ret['changes']['diff'] = 'New file'
|
||||
# Apply the new file
|
||||
if not __opts__['test']:
|
||||
if makedirs:
|
||||
_makedirs(name)
|
||||
shutil.copy(sfn, name)
|
||||
# Check permissions
|
||||
perms = {}
|
||||
perms['luser'] = __salt__['file.get_user'](name)
|
||||
perms['lgroup'] = __salt__['file.get_group'](name)
|
||||
perms['lmode'] = __salt__['file.get_mode'](name)
|
||||
# Run through the perms and detect and apply the needed changes to
|
||||
# permissions
|
||||
if user:
|
||||
if user != perms['luser']:
|
||||
perms['cuser'] = user
|
||||
if group:
|
||||
if group != perms['lgroup']:
|
||||
perms['cgroup'] = group
|
||||
if perms.has_key('cuser') or perms.has_key('cgroup'):
|
||||
if not __opts__['test']:
|
||||
__salt__['file.chown'](
|
||||
name,
|
||||
user,
|
||||
group
|
||||
)
|
||||
if mode:
|
||||
if mode != perms['lmode']:
|
||||
if not __opts__['test']:
|
||||
__salt__['file.set_mode'](name, mode)
|
||||
if mode != __salt__['file.get_mode'](name):
|
||||
ret['result'] = False
|
||||
ret['comment'] += 'Mode not changed '
|
||||
else:
|
||||
ret['changes']['mode'] = mode
|
||||
if user:
|
||||
if user != __salt__['file.get_user'](name):
|
||||
ret['result'] = False
|
||||
ret['comment'] += 'User not changed '
|
||||
elif perms.has_key('cuser'):
|
||||
ret['changes']['user'] = user
|
||||
if group:
|
||||
if group != __salt__['file.get_group'](name):
|
||||
ret['result'] = False
|
||||
ret['comment'] += 'Group not changed '
|
||||
elif perms.has_key('cgroup'):
|
||||
ret['changes']['group'] = group
|
||||
|
||||
if not ret['comment']:
|
||||
ret['comment'] = 'File ' + name + ' updated'
|
||||
|
||||
if __opts__['test']:
|
||||
ret['comment'] = 'File ' + name + ' not updated'
|
||||
elif not ret['changes'] and ret['result']:
|
||||
ret['comment'] = 'File ' + name + ' is in the correct state'
|
||||
return ret
|
||||
|
|
@ -1,70 +0,0 @@
|
|||
'''
|
||||
State enforcement for groups
|
||||
'''
|
||||
|
||||
def present(name, gid=None):
|
||||
'''
|
||||
Ensure that a group is present
|
||||
'''
|
||||
ret = {'name': name,
|
||||
'changes': {},
|
||||
'result': True,
|
||||
'comment': ''}
|
||||
for lgrp in __salt__['group.getent']():
|
||||
# Scan over the groups
|
||||
if lgrp['name'] == name:
|
||||
# The group is present, is the gid right?
|
||||
if gid:
|
||||
if lgrp['gid'] == gid:
|
||||
# All good, return likewise
|
||||
ret['comment'] = 'No change'
|
||||
return ret
|
||||
else:
|
||||
ret['result'] = __salt__['group.chgid'](name, gid)
|
||||
if ret['result']:
|
||||
ret['comment'] = 'Changed gid to {0} for group {1}'.format(
|
||||
gid, name
|
||||
)
|
||||
ret['changes'] = {name: gid}
|
||||
return ret
|
||||
else:
|
||||
ret['comment'] = 'Failed to change gid to {0} for group {1}'.format(
|
||||
gid, name
|
||||
)
|
||||
return ret
|
||||
else:
|
||||
ret['comment'] = 'Group {0} is already present'.format(name)
|
||||
return ret
|
||||
# Group is not present, make it!
|
||||
ret['result'] = __salt__['group.add'](name, gid)
|
||||
if ret['result']:
|
||||
ret['changes'] = __salt__['group.info'](name)
|
||||
ret['comment'] = 'Added group {0}'.format(name)
|
||||
return ret
|
||||
else:
|
||||
ret['comment'] = 'Failed to apply group {0}'.format(name)
|
||||
return ret
|
||||
|
||||
def absent(name):
|
||||
'''
|
||||
Ensure that the named group is absent
|
||||
'''
|
||||
ret = {'name': name,
|
||||
'changes': {},
|
||||
'result': True,
|
||||
'comment': ''}
|
||||
for lgrp in __salt__['group.getent']():
|
||||
# Scan over the groups
|
||||
if lgrp['name'] == name:
|
||||
# The group is present, DESTROY!!
|
||||
ret['result'] = __salt__['group.delete'](name)
|
||||
if ret['result']:
|
||||
ret['changes'] = {name: ''}
|
||||
ret['comment'] = 'Removed group {0}'.format(name)
|
||||
return ret
|
||||
else:
|
||||
ret['comment'] = 'Failed to remove group {0}'.format(name)
|
||||
return ret
|
||||
ret['comment'] = 'Group not present'
|
||||
return ret
|
||||
|
|
@ -1,48 +0,0 @@
|
|||
'''
|
||||
Manage the state of the hosts file
|
||||
'''
|
||||
|
||||
def present(name, ip):
|
||||
'''
|
||||
Ensures that the named host is present with the given ip
|
||||
'''
|
||||
ret = {'name': name,
|
||||
'changes': {},
|
||||
'result': False,
|
||||
'comment': ''}
|
||||
if __salt__['hosts.has_pair'](ip, name):
|
||||
ret['result'] = True
|
||||
ret['comment'] = 'Host {0} already present'.format(name)
|
||||
return ret
|
||||
if __salt__['hosts.add_host'](ip, name):
|
||||
ret['changes'] = {'host': name}
|
||||
ret['result'] = True
|
||||
ret['comment'] = 'Added host {0}'.format(name)
|
||||
return ret
|
||||
else:
|
||||
ret['result'] = False
|
||||
ret['comment'] = 'Failed to set host'
|
||||
return ret
|
||||
|
||||
def absent(name, ip):
|
||||
'''
|
||||
Ensure that the the named host is absent
|
||||
'''
|
||||
ret = {'name': name,
|
||||
'changes': {},
|
||||
'result': False,
|
||||
'comment': ''}
|
||||
if not __salt__['hosts.has_pair'](ip, name):
|
||||
ret['result'] = True
|
||||
ret['comment'] = 'Host {0} already absent'.format(name)
|
||||
return ret
|
||||
if __salt__['hosts.rm_host'](ip, name):
|
||||
ret['changes'] = {'host': name}
|
||||
ret['result'] = True
|
||||
ret['comment'] = 'Removed host {0}'.format(name)
|
||||
return ret
|
||||
else:
|
||||
ret['result'] = False
|
||||
ret['comment'] = 'Failed to remove host'
|
||||
return ret
|
||||
|
|
@ -1,87 +0,0 @@
|
|||
'''
|
||||
State enforcement for mount points
|
||||
'''
|
||||
|
||||
def mounted(
|
||||
name,
|
||||
device,
|
||||
fstype,
|
||||
mkmnt=False,
|
||||
opts=['defaults'],
|
||||
dump=0,
|
||||
pass_num=0,
|
||||
config='/etc/fstab',
|
||||
remount=True,
|
||||
persist=True,
|
||||
):
|
||||
ret = {'name': name,
|
||||
'changes': {},
|
||||
'result': True,
|
||||
'comment': ''}
|
||||
# Make sure that opts is correct, it can be a list or a comma delimited
|
||||
# string
|
||||
if type(opts) == type(str()):
|
||||
opts = opts.split(',')
|
||||
# Get the active data
|
||||
active = __salt__['mount.active']()
|
||||
if active.has_key(name):
|
||||
# The mount point is mounted!
|
||||
# Check to see if it is the right setup
|
||||
remnt = False
|
||||
if not active[name]['device'] == device\
|
||||
or not active[name]['fstype'] == fstype:
|
||||
remnt = True
|
||||
# check the mount options, don't care if the desired behavior is
|
||||
# defaults
|
||||
if not opts == ['defaults']:
|
||||
if not set(active[name]['opts']) == set(opts):
|
||||
remnt = True
|
||||
if remnt:
|
||||
# The fstype has a remount opt, try it!
|
||||
out = __salt__['mount.remount'](name, device, mkmnt, fstype, opts)
|
||||
if type(out) == type(str()):
|
||||
# Failed to remount, the state has failed!
|
||||
ret['comment'] = out
|
||||
ret['result'] = False
|
||||
return ret
|
||||
elif out == True:
|
||||
# Remount worked!
|
||||
ret['changes']['mount'] = True
|
||||
else:
|
||||
# The mount is not present! Mount it
|
||||
out = __salt__['mount.mount'](name, device, mkmnt, fstype, opts)
|
||||
if type(out) == type(str()):
|
||||
# Failed to remount, the state has failed!
|
||||
ret['comment'] = out
|
||||
ret['result'] = False
|
||||
return ret
|
||||
elif out == True:
|
||||
# Remount worked!
|
||||
ret['changes']['mount'] = True
|
||||
if persist:
|
||||
# present, new, change, bad config
|
||||
# Make sure the entry is in the fstab
|
||||
out = __salt__['mount.set_fstab'](
|
||||
name,
|
||||
device,
|
||||
fstype,
|
||||
opts,
|
||||
dump,
|
||||
pass_num,
|
||||
config)
|
||||
if out == 'present':
|
||||
return ret
|
||||
if out == 'new':
|
||||
ret['changes']['persist'] = 'new'
|
||||
ret['comment'] += ' and added new entry to the fstab'
|
||||
return ret
|
||||
if out == 'change':
|
||||
ret['changes']['persist'] = 'update'
|
||||
ret['comment'] += ' and updated the entry in the fstab'
|
||||
return ret
|
||||
if out == 'bad config':
|
||||
ret['result'] = False
|
||||
ret['comment'] += ' but the fstab was not found'
|
||||
return ret
|
||||
|
||||
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Reference in a new issue