mirror of
https://github.com/saltstack/salt.git
synced 2025-04-17 10:10:20 +00:00
Merge branch '2018.3' into fix-icinga2-cert-path
This commit is contained in:
commit
ce1842e6b1
570 changed files with 24867 additions and 13593 deletions
10
.ci/docs
10
.ci/docs
|
@ -1,8 +1,11 @@
|
|||
pipeline {
|
||||
agent { label 'docs' }
|
||||
agent {
|
||||
label 'docs'
|
||||
}
|
||||
options {
|
||||
timestamps()
|
||||
ansiColor('xterm')
|
||||
timeout(time: 2, unit: 'HOURS')
|
||||
}
|
||||
environment {
|
||||
PYENV_ROOT = "/usr/local/pyenv"
|
||||
|
@ -26,7 +29,7 @@ pipeline {
|
|||
}
|
||||
stage('build') {
|
||||
steps {
|
||||
sh 'eval "$(pyenv init -)"; make -C doc clean html'
|
||||
sh 'eval "$(pyenv init -)"; make SPHINXOPTS="-W" -C doc clean html'
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -45,6 +48,9 @@ pipeline {
|
|||
description: 'The docs job has failed',
|
||||
status: 'FAILURE',
|
||||
context: "jenkins/pr/docs"
|
||||
slackSend channel: "#jenkins-prod-pr",
|
||||
color: '#FF0000',
|
||||
message: "FAILED: PR-Job: '${env.JOB_NAME} [${env.BUILD_NUMBER}]' (${env.BUILD_URL})"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,73 +1,85 @@
|
|||
pipeline {
|
||||
agent { label 'kitchen-slave' }
|
||||
options {
|
||||
timestamps()
|
||||
ansiColor('xterm')
|
||||
}
|
||||
environment {
|
||||
SALT_KITCHEN_PLATFORMS = "/var/jenkins/workspace/platforms.yml"
|
||||
SALT_KITCHEN_DRIVER = "/var/jenkins/workspace/driver.yml"
|
||||
PATH = "/usr/local/rbenv/shims/:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/root/bin:/root/bin"
|
||||
RBENV_VERSION = "2.4.2"
|
||||
TEST_SUITE = "py2"
|
||||
TEST_PLATFORM = "centos-7"
|
||||
PY_COLORS = 1
|
||||
}
|
||||
stages {
|
||||
stage('github-pending') {
|
||||
steps {
|
||||
githubNotify credentialsId: 'test-jenkins-credentials',
|
||||
description: "running ${TEST_SUITE}-${TEST_PLATFORM}...",
|
||||
status: 'PENDING',
|
||||
context: "jenkins/pr/${TEST_SUITE}-${TEST_PLATFORM}"
|
||||
}
|
||||
}
|
||||
stage('setup') {
|
||||
steps {
|
||||
sh 'bundle install --with ec2 windows --without opennebula docker'
|
||||
}
|
||||
}
|
||||
stage('run kitchen') {
|
||||
steps {
|
||||
script { withCredentials([[$class: 'AmazonWebServicesCredentialsBinding', accessKeyVariable: 'AWS_ACCESS_KEY_ID', credentialsId: 'AWS_ACCESS_KEY_ID', secretKeyVariable: 'AWS_SECRET_ACCESS_KEY']]) {
|
||||
sshagent(credentials: ['jenkins-testing-ssh-key']) {
|
||||
sh 'ssh-add ~/.ssh/jenkins-testing.pem'
|
||||
sh 'bundle exec kitchen converge $TEST_SUITE-$TEST_PLATFORM || bundle exec kitchen converge $TEST_SUITE-$TEST_PLATFORM'
|
||||
sh 'bundle exec kitchen verify $TEST_SUITE-$TEST_PLATFORM'
|
||||
timeout(time: 8, unit: 'HOURS') {
|
||||
node('kitchen-slave') {
|
||||
timestamps {
|
||||
ansiColor('xterm') {
|
||||
withEnv([
|
||||
'SALT_KITCHEN_PLATFORMS=/var/jenkins/workspace/platforms.yml',
|
||||
'SALT_KITCHEN_DRIVER=/var/jenkins/workspace/driver.yml',
|
||||
'PATH=/usr/local/rbenv/shims/:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/root/bin:/root/bin',
|
||||
'RBENV_VERSION=2.4.2',
|
||||
'TEST_SUITE=py2',
|
||||
'TEST_PLATFORM=centos-7',
|
||||
'PY_COLORS=1',
|
||||
]) {
|
||||
stage('checkout-scm') {
|
||||
cleanWs notFailBuild: true
|
||||
checkout scm
|
||||
}
|
||||
}}
|
||||
}
|
||||
post {
|
||||
always {
|
||||
script { withCredentials([[$class: 'AmazonWebServicesCredentialsBinding', accessKeyVariable: 'AWS_ACCESS_KEY_ID', credentialsId: 'AWS_ACCESS_KEY_ID', secretKeyVariable: 'AWS_SECRET_ACCESS_KEY']]) {
|
||||
sshagent(credentials: ['jenkins-testing-ssh-key']) {
|
||||
sh 'ssh-add ~/.ssh/jenkins-testing.pem'
|
||||
sh 'bundle exec kitchen destroy $TEST_SUITE-$TEST_PLATFORM'
|
||||
try {
|
||||
stage('github-pending') {
|
||||
githubNotify credentialsId: 'test-jenkins-credentials',
|
||||
description: "running ${TEST_SUITE}-${TEST_PLATFORM}...",
|
||||
status: 'PENDING',
|
||||
context: "jenkins/pr/${TEST_SUITE}-${TEST_PLATFORM}"
|
||||
}
|
||||
}}
|
||||
archiveArtifacts artifacts: 'artifacts/xml-unittests-output/*.xml'
|
||||
archiveArtifacts artifacts: 'artifacts/logs/minion'
|
||||
archiveArtifacts artifacts: 'artifacts/logs/salt-runtests.log'
|
||||
stage('setup-bundle') {
|
||||
sh 'bundle install --with ec2 windows --without opennebula docker'
|
||||
}
|
||||
try {
|
||||
stage('run kitchen') {
|
||||
withCredentials([
|
||||
[$class: 'AmazonWebServicesCredentialsBinding', accessKeyVariable: 'AWS_ACCESS_KEY_ID', credentialsId: 'AWS_ACCESS_KEY_ID', secretKeyVariable: 'AWS_SECRET_ACCESS_KEY']
|
||||
]) {
|
||||
sshagent(credentials: ['jenkins-testing-ssh-key']) {
|
||||
sh 'ssh-add ~/.ssh/jenkins-testing.pem'
|
||||
sh 'bundle exec kitchen converge $TEST_SUITE-$TEST_PLATFORM || bundle exec kitchen converge $TEST_SUITE-$TEST_PLATFORM'
|
||||
sh 'bundle exec kitchen verify $TEST_SUITE-$TEST_PLATFORM'
|
||||
}
|
||||
}
|
||||
}
|
||||
} finally {
|
||||
stage('cleanup kitchen') {
|
||||
script {
|
||||
withCredentials([
|
||||
[$class: 'AmazonWebServicesCredentialsBinding', accessKeyVariable: 'AWS_ACCESS_KEY_ID', credentialsId: 'AWS_ACCESS_KEY_ID', secretKeyVariable: 'AWS_SECRET_ACCESS_KEY']
|
||||
]) {
|
||||
sshagent(credentials: ['jenkins-testing-ssh-key']) {
|
||||
sh 'ssh-add ~/.ssh/jenkins-testing.pem'
|
||||
sh 'bundle exec kitchen destroy $TEST_SUITE-$TEST_PLATFORM'
|
||||
}
|
||||
}
|
||||
}
|
||||
archiveArtifacts artifacts: 'artifacts/xml-unittests-output/*.xml'
|
||||
archiveArtifacts artifacts: 'artifacts/logs/minion'
|
||||
archiveArtifacts artifacts: 'artifacts/logs/salt-runtests.log'
|
||||
}
|
||||
}
|
||||
} catch (Exception e) {
|
||||
currentBuild.result = 'FAILURE'
|
||||
} finally {
|
||||
try {
|
||||
junit 'artifacts/xml-unittests-output/*.xml'
|
||||
} finally {
|
||||
cleanWs notFailBuild: true
|
||||
def currentResult = currentBuild.result ?: 'SUCCESS'
|
||||
if (currentResult == 'SUCCESS') {
|
||||
githubNotify credentialsId: 'test-jenkins-credentials',
|
||||
description: "The ${TEST_SUITE}-${TEST_PLATFORM} job has passed",
|
||||
status: 'SUCCESS',
|
||||
context: "jenkins/pr/${TEST_SUITE}-${TEST_PLATFORM}"
|
||||
} else {
|
||||
githubNotify credentialsId: 'test-jenkins-credentials',
|
||||
description: "The ${TEST_SUITE}-${TEST_PLATFORM} job has failed",
|
||||
status: 'FAILURE',
|
||||
context: "jenkins/pr/${TEST_SUITE}-${TEST_PLATFORM}"
|
||||
slackSend channel: "#jenkins-prod-pr",
|
||||
color: '#FF0000',
|
||||
message: "FAILED: PR-Job: '${env.JOB_NAME} [${env.BUILD_NUMBER}]' (${env.BUILD_URL})"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
post {
|
||||
always {
|
||||
junit 'artifacts/xml-unittests-output/*.xml'
|
||||
cleanWs()
|
||||
}
|
||||
success {
|
||||
githubNotify credentialsId: 'test-jenkins-credentials',
|
||||
description: "The ${TEST_SUITE}-${TEST_PLATFORM} job has passed",
|
||||
status: 'SUCCESS',
|
||||
context: "jenkins/pr/${TEST_SUITE}-${TEST_PLATFORM}"
|
||||
}
|
||||
failure {
|
||||
githubNotify credentialsId: 'test-jenkins-credentials',
|
||||
description: "The ${TEST_SUITE}-${TEST_PLATFORM} job has failed",
|
||||
status: 'FAILURE',
|
||||
context: "jenkins/pr/${TEST_SUITE}-${TEST_PLATFORM}"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,73 +1,85 @@
|
|||
pipeline {
|
||||
agent { label 'kitchen-slave' }
|
||||
options {
|
||||
timestamps()
|
||||
ansiColor('xterm')
|
||||
}
|
||||
environment {
|
||||
SALT_KITCHEN_PLATFORMS = "/var/jenkins/workspace/platforms.yml"
|
||||
SALT_KITCHEN_DRIVER = "/var/jenkins/workspace/driver.yml"
|
||||
PATH = "/usr/local/rbenv/shims/:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/root/bin:/root/bin"
|
||||
RBENV_VERSION = "2.4.2"
|
||||
TEST_SUITE = "py3"
|
||||
TEST_PLATFORM = "centos-7"
|
||||
PY_COLORS = 1
|
||||
}
|
||||
stages {
|
||||
stage('github-pending') {
|
||||
steps {
|
||||
githubNotify credentialsId: 'test-jenkins-credentials',
|
||||
description: "running ${TEST_SUITE}-${TEST_PLATFORM}...",
|
||||
status: 'PENDING',
|
||||
context: "jenkins/pr/${TEST_SUITE}-${TEST_PLATFORM}"
|
||||
}
|
||||
}
|
||||
stage('setup') {
|
||||
steps {
|
||||
sh 'bundle install --with ec2 windows --without opennebula docker'
|
||||
}
|
||||
}
|
||||
stage('run kitchen') {
|
||||
steps {
|
||||
script { withCredentials([[$class: 'AmazonWebServicesCredentialsBinding', accessKeyVariable: 'AWS_ACCESS_KEY_ID', credentialsId: 'AWS_ACCESS_KEY_ID', secretKeyVariable: 'AWS_SECRET_ACCESS_KEY']]) {
|
||||
sshagent(credentials: ['jenkins-testing-ssh-key']) {
|
||||
sh 'ssh-add ~/.ssh/jenkins-testing.pem'
|
||||
sh 'bundle exec kitchen converge $TEST_SUITE-$TEST_PLATFORM || bundle exec kitchen converge $TEST_SUITE-$TEST_PLATFORM'
|
||||
sh 'bundle exec kitchen verify $TEST_SUITE-$TEST_PLATFORM'
|
||||
timeout(time: 6, unit: 'HOURS') {
|
||||
node('kitchen-slave') {
|
||||
timestamps {
|
||||
ansiColor('xterm') {
|
||||
withEnv([
|
||||
'SALT_KITCHEN_PLATFORMS=/var/jenkins/workspace/platforms.yml',
|
||||
'SALT_KITCHEN_DRIVER=/var/jenkins/workspace/driver.yml',
|
||||
'PATH=/usr/local/rbenv/shims/:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/root/bin:/root/bin',
|
||||
'RBENV_VERSION=2.4.2',
|
||||
'TEST_SUITE=py3',
|
||||
'TEST_PLATFORM=centos-7',
|
||||
'PY_COLORS=1',
|
||||
]) {
|
||||
stage('checkout-scm') {
|
||||
cleanWs notFailBuild: true
|
||||
checkout scm
|
||||
}
|
||||
}}
|
||||
}
|
||||
post {
|
||||
always {
|
||||
script { withCredentials([[$class: 'AmazonWebServicesCredentialsBinding', accessKeyVariable: 'AWS_ACCESS_KEY_ID', credentialsId: 'AWS_ACCESS_KEY_ID', secretKeyVariable: 'AWS_SECRET_ACCESS_KEY']]) {
|
||||
sshagent(credentials: ['jenkins-testing-ssh-key']) {
|
||||
sh 'ssh-add ~/.ssh/jenkins-testing.pem'
|
||||
sh 'bundle exec kitchen destroy $TEST_SUITE-$TEST_PLATFORM'
|
||||
try {
|
||||
stage('github-pending') {
|
||||
githubNotify credentialsId: 'test-jenkins-credentials',
|
||||
description: "running ${TEST_SUITE}-${TEST_PLATFORM}...",
|
||||
status: 'PENDING',
|
||||
context: "jenkins/pr/${TEST_SUITE}-${TEST_PLATFORM}"
|
||||
}
|
||||
}}
|
||||
archiveArtifacts artifacts: 'artifacts/xml-unittests-output/*.xml'
|
||||
archiveArtifacts artifacts: 'artifacts/logs/minion'
|
||||
archiveArtifacts artifacts: 'artifacts/logs/salt-runtests.log'
|
||||
stage('setup-bundle') {
|
||||
sh 'bundle install --with ec2 windows --without opennebula docker'
|
||||
}
|
||||
try {
|
||||
stage('run kitchen') {
|
||||
withCredentials([
|
||||
[$class: 'AmazonWebServicesCredentialsBinding', accessKeyVariable: 'AWS_ACCESS_KEY_ID', credentialsId: 'AWS_ACCESS_KEY_ID', secretKeyVariable: 'AWS_SECRET_ACCESS_KEY']
|
||||
]) {
|
||||
sshagent(credentials: ['jenkins-testing-ssh-key']) {
|
||||
sh 'ssh-add ~/.ssh/jenkins-testing.pem'
|
||||
sh 'bundle exec kitchen converge $TEST_SUITE-$TEST_PLATFORM || bundle exec kitchen converge $TEST_SUITE-$TEST_PLATFORM'
|
||||
sh 'bundle exec kitchen verify $TEST_SUITE-$TEST_PLATFORM'
|
||||
}
|
||||
}
|
||||
}
|
||||
} finally {
|
||||
stage('cleanup kitchen') {
|
||||
script {
|
||||
withCredentials([
|
||||
[$class: 'AmazonWebServicesCredentialsBinding', accessKeyVariable: 'AWS_ACCESS_KEY_ID', credentialsId: 'AWS_ACCESS_KEY_ID', secretKeyVariable: 'AWS_SECRET_ACCESS_KEY']
|
||||
]) {
|
||||
sshagent(credentials: ['jenkins-testing-ssh-key']) {
|
||||
sh 'ssh-add ~/.ssh/jenkins-testing.pem'
|
||||
sh 'bundle exec kitchen destroy $TEST_SUITE-$TEST_PLATFORM'
|
||||
}
|
||||
}
|
||||
}
|
||||
archiveArtifacts artifacts: 'artifacts/xml-unittests-output/*.xml'
|
||||
archiveArtifacts artifacts: 'artifacts/logs/minion'
|
||||
archiveArtifacts artifacts: 'artifacts/logs/salt-runtests.log'
|
||||
}
|
||||
}
|
||||
} catch (Exception e) {
|
||||
currentBuild.result = 'FAILURE'
|
||||
} finally {
|
||||
try {
|
||||
junit 'artifacts/xml-unittests-output/*.xml'
|
||||
} finally {
|
||||
cleanWs notFailBuild: true
|
||||
def currentResult = currentBuild.result ?: 'SUCCESS'
|
||||
if (currentResult == 'SUCCESS') {
|
||||
githubNotify credentialsId: 'test-jenkins-credentials',
|
||||
description: "The ${TEST_SUITE}-${TEST_PLATFORM} job has passed",
|
||||
status: 'SUCCESS',
|
||||
context: "jenkins/pr/${TEST_SUITE}-${TEST_PLATFORM}"
|
||||
} else {
|
||||
githubNotify credentialsId: 'test-jenkins-credentials',
|
||||
description: "The ${TEST_SUITE}-${TEST_PLATFORM} job has failed",
|
||||
status: 'FAILURE',
|
||||
context: "jenkins/pr/${TEST_SUITE}-${TEST_PLATFORM}"
|
||||
slackSend channel: "#jenkins-prod-pr",
|
||||
color: '#FF0000',
|
||||
message: "FAILED: PR-Job: '${env.JOB_NAME} [${env.BUILD_NUMBER}]' (${env.BUILD_URL})"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
post {
|
||||
always {
|
||||
junit 'artifacts/xml-unittests-output/*.xml'
|
||||
cleanWs()
|
||||
}
|
||||
success {
|
||||
githubNotify credentialsId: 'test-jenkins-credentials',
|
||||
description: "The ${TEST_SUITE}-${TEST_PLATFORM} job has passed",
|
||||
status: 'SUCCESS',
|
||||
context: "jenkins/pr/${TEST_SUITE}-${TEST_PLATFORM}"
|
||||
}
|
||||
failure {
|
||||
githubNotify credentialsId: 'test-jenkins-credentials',
|
||||
description: "The ${TEST_SUITE}-${TEST_PLATFORM} job has failed",
|
||||
status: 'FAILURE',
|
||||
context: "jenkins/pr/${TEST_SUITE}-${TEST_PLATFORM}"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,73 +1,85 @@
|
|||
pipeline {
|
||||
agent { label 'kitchen-slave' }
|
||||
options {
|
||||
timestamps()
|
||||
ansiColor('xterm')
|
||||
}
|
||||
environment {
|
||||
SALT_KITCHEN_PLATFORMS = "/var/jenkins/workspace/platforms.yml"
|
||||
SALT_KITCHEN_DRIVER = "/var/jenkins/workspace/driver.yml"
|
||||
PATH = "/usr/local/rbenv/shims/:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/root/bin:/root/bin"
|
||||
RBENV_VERSION = "2.4.2"
|
||||
TEST_SUITE = "py2"
|
||||
TEST_PLATFORM = "ubuntu-1604"
|
||||
PY_COLORS = 1
|
||||
}
|
||||
stages {
|
||||
stage('github-pending') {
|
||||
steps {
|
||||
githubNotify credentialsId: 'test-jenkins-credentials',
|
||||
description: "running ${TEST_SUITE}-${TEST_PLATFORM}...",
|
||||
status: 'PENDING',
|
||||
context: "jenkins/pr/${TEST_SUITE}-${TEST_PLATFORM}"
|
||||
}
|
||||
}
|
||||
stage('setup') {
|
||||
steps {
|
||||
sh 'bundle install --with ec2 windows --without opennebula docker'
|
||||
}
|
||||
}
|
||||
stage('run kitchen') {
|
||||
steps {
|
||||
script { withCredentials([[$class: 'AmazonWebServicesCredentialsBinding', accessKeyVariable: 'AWS_ACCESS_KEY_ID', credentialsId: 'AWS_ACCESS_KEY_ID', secretKeyVariable: 'AWS_SECRET_ACCESS_KEY']]) {
|
||||
sshagent(credentials: ['jenkins-testing-ssh-key']) {
|
||||
sh 'ssh-add ~/.ssh/jenkins-testing.pem'
|
||||
sh 'bundle exec kitchen converge $TEST_SUITE-$TEST_PLATFORM || bundle exec kitchen converge $TEST_SUITE-$TEST_PLATFORM'
|
||||
sh 'bundle exec kitchen verify $TEST_SUITE-$TEST_PLATFORM'
|
||||
timeout(time: 6, unit: 'HOURS') {
|
||||
node('kitchen-slave') {
|
||||
timestamps {
|
||||
ansiColor('xterm') {
|
||||
withEnv([
|
||||
'SALT_KITCHEN_PLATFORMS=/var/jenkins/workspace/platforms.yml',
|
||||
'SALT_KITCHEN_DRIVER=/var/jenkins/workspace/driver.yml',
|
||||
'PATH=/usr/local/rbenv/shims/:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/root/bin:/root/bin',
|
||||
'RBENV_VERSION=2.4.2',
|
||||
'TEST_SUITE=py2',
|
||||
'TEST_PLATFORM=ubuntu-1604',
|
||||
'PY_COLORS=1',
|
||||
]) {
|
||||
stage('checkout-scm') {
|
||||
cleanWs notFailBuild: true
|
||||
checkout scm
|
||||
}
|
||||
}}
|
||||
}
|
||||
post {
|
||||
always {
|
||||
script { withCredentials([[$class: 'AmazonWebServicesCredentialsBinding', accessKeyVariable: 'AWS_ACCESS_KEY_ID', credentialsId: 'AWS_ACCESS_KEY_ID', secretKeyVariable: 'AWS_SECRET_ACCESS_KEY']]) {
|
||||
sshagent(credentials: ['jenkins-testing-ssh-key']) {
|
||||
sh 'ssh-add ~/.ssh/jenkins-testing.pem'
|
||||
sh 'bundle exec kitchen destroy $TEST_SUITE-$TEST_PLATFORM'
|
||||
try {
|
||||
stage('github-pending') {
|
||||
githubNotify credentialsId: 'test-jenkins-credentials',
|
||||
description: "running ${TEST_SUITE}-${TEST_PLATFORM}...",
|
||||
status: 'PENDING',
|
||||
context: "jenkins/pr/${TEST_SUITE}-${TEST_PLATFORM}"
|
||||
}
|
||||
}}
|
||||
archiveArtifacts artifacts: 'artifacts/xml-unittests-output/*.xml'
|
||||
archiveArtifacts artifacts: 'artifacts/logs/minion'
|
||||
archiveArtifacts artifacts: 'artifacts/logs/salt-runtests.log'
|
||||
stage('setup-bundle') {
|
||||
sh 'bundle install --with ec2 windows --without opennebula docker'
|
||||
}
|
||||
try {
|
||||
stage('run kitchen') {
|
||||
withCredentials([
|
||||
[$class: 'AmazonWebServicesCredentialsBinding', accessKeyVariable: 'AWS_ACCESS_KEY_ID', credentialsId: 'AWS_ACCESS_KEY_ID', secretKeyVariable: 'AWS_SECRET_ACCESS_KEY']
|
||||
]) {
|
||||
sshagent(credentials: ['jenkins-testing-ssh-key']) {
|
||||
sh 'ssh-add ~/.ssh/jenkins-testing.pem'
|
||||
sh 'bundle exec kitchen converge $TEST_SUITE-$TEST_PLATFORM || bundle exec kitchen converge $TEST_SUITE-$TEST_PLATFORM'
|
||||
sh 'bundle exec kitchen verify $TEST_SUITE-$TEST_PLATFORM'
|
||||
}
|
||||
}
|
||||
}
|
||||
} finally {
|
||||
stage('cleanup kitchen') {
|
||||
script {
|
||||
withCredentials([
|
||||
[$class: 'AmazonWebServicesCredentialsBinding', accessKeyVariable: 'AWS_ACCESS_KEY_ID', credentialsId: 'AWS_ACCESS_KEY_ID', secretKeyVariable: 'AWS_SECRET_ACCESS_KEY']
|
||||
]) {
|
||||
sshagent(credentials: ['jenkins-testing-ssh-key']) {
|
||||
sh 'ssh-add ~/.ssh/jenkins-testing.pem'
|
||||
sh 'bundle exec kitchen destroy $TEST_SUITE-$TEST_PLATFORM'
|
||||
}
|
||||
}
|
||||
}
|
||||
archiveArtifacts artifacts: 'artifacts/xml-unittests-output/*.xml'
|
||||
archiveArtifacts artifacts: 'artifacts/logs/minion'
|
||||
archiveArtifacts artifacts: 'artifacts/logs/salt-runtests.log'
|
||||
}
|
||||
}
|
||||
} catch (Exception e) {
|
||||
currentBuild.result = 'FAILURE'
|
||||
} finally {
|
||||
try {
|
||||
junit 'artifacts/xml-unittests-output/*.xml'
|
||||
} finally {
|
||||
cleanWs notFailBuild: true
|
||||
def currentResult = currentBuild.result ?: 'SUCCESS'
|
||||
if (currentResult == 'SUCCESS') {
|
||||
githubNotify credentialsId: 'test-jenkins-credentials',
|
||||
description: "The ${TEST_SUITE}-${TEST_PLATFORM} job has passed",
|
||||
status: 'SUCCESS',
|
||||
context: "jenkins/pr/${TEST_SUITE}-${TEST_PLATFORM}"
|
||||
} else {
|
||||
githubNotify credentialsId: 'test-jenkins-credentials',
|
||||
description: "The ${TEST_SUITE}-${TEST_PLATFORM} job has failed",
|
||||
status: 'FAILURE',
|
||||
context: "jenkins/pr/${TEST_SUITE}-${TEST_PLATFORM}"
|
||||
slackSend channel: "#jenkins-prod-pr",
|
||||
color: '#FF0000',
|
||||
message: "FAILED: PR-Job: '${env.JOB_NAME} [${env.BUILD_NUMBER}]' (${env.BUILD_URL})"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
post {
|
||||
always {
|
||||
junit 'artifacts/xml-unittests-output/*.xml'
|
||||
cleanWs()
|
||||
}
|
||||
success {
|
||||
githubNotify credentialsId: 'test-jenkins-credentials',
|
||||
description: "The ${TEST_SUITE}-${TEST_PLATFORM} job has passed",
|
||||
status: 'SUCCESS',
|
||||
context: "jenkins/pr/${TEST_SUITE}-${TEST_PLATFORM}"
|
||||
}
|
||||
failure {
|
||||
githubNotify credentialsId: 'test-jenkins-credentials',
|
||||
description: "The ${TEST_SUITE}-${TEST_PLATFORM} job has failed",
|
||||
status: 'FAILURE',
|
||||
context: "jenkins/pr/${TEST_SUITE}-${TEST_PLATFORM}"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,73 +1,85 @@
|
|||
pipeline {
|
||||
agent { label 'kitchen-slave' }
|
||||
options {
|
||||
timestamps()
|
||||
ansiColor('xterm')
|
||||
}
|
||||
environment {
|
||||
SALT_KITCHEN_PLATFORMS = "/var/jenkins/workspace/platforms.yml"
|
||||
SALT_KITCHEN_DRIVER = "/var/jenkins/workspace/driver.yml"
|
||||
PATH = "/usr/local/rbenv/shims/:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/root/bin:/root/bin"
|
||||
RBENV_VERSION = "2.4.2"
|
||||
TEST_SUITE = "py3"
|
||||
TEST_PLATFORM = "ubuntu-1604"
|
||||
PY_COLORS = 1
|
||||
}
|
||||
stages {
|
||||
stage('github-pending') {
|
||||
steps {
|
||||
githubNotify credentialsId: 'test-jenkins-credentials',
|
||||
description: "running ${TEST_SUITE}-${TEST_PLATFORM}...",
|
||||
status: 'PENDING',
|
||||
context: "jenkins/pr/${TEST_SUITE}-${TEST_PLATFORM}"
|
||||
}
|
||||
}
|
||||
stage('setup') {
|
||||
steps {
|
||||
sh 'bundle install --with ec2 windows --without opennebula docker'
|
||||
}
|
||||
}
|
||||
stage('run kitchen') {
|
||||
steps {
|
||||
script { withCredentials([[$class: 'AmazonWebServicesCredentialsBinding', accessKeyVariable: 'AWS_ACCESS_KEY_ID', credentialsId: 'AWS_ACCESS_KEY_ID', secretKeyVariable: 'AWS_SECRET_ACCESS_KEY']]) {
|
||||
sshagent(credentials: ['jenkins-testing-ssh-key']) {
|
||||
sh 'ssh-add ~/.ssh/jenkins-testing.pem'
|
||||
sh 'bundle exec kitchen converge $TEST_SUITE-$TEST_PLATFORM || bundle exec kitchen converge $TEST_SUITE-$TEST_PLATFORM'
|
||||
sh 'bundle exec kitchen verify $TEST_SUITE-$TEST_PLATFORM'
|
||||
timeout(time: 6, unit: 'HOURS') {
|
||||
node('kitchen-slave') {
|
||||
timestamps {
|
||||
ansiColor('xterm') {
|
||||
withEnv([
|
||||
'SALT_KITCHEN_PLATFORMS=/var/jenkins/workspace/platforms.yml',
|
||||
'SALT_KITCHEN_DRIVER=/var/jenkins/workspace/driver.yml',
|
||||
'PATH=/usr/local/rbenv/shims/:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/root/bin:/root/bin',
|
||||
'RBENV_VERSION=2.4.2',
|
||||
'TEST_SUITE=py3',
|
||||
'TEST_PLATFORM=ubuntu-1604',
|
||||
'PY_COLORS=1',
|
||||
]) {
|
||||
stage('checkout-scm') {
|
||||
cleanWs notFailBuild: true
|
||||
checkout scm
|
||||
}
|
||||
}}
|
||||
}
|
||||
post {
|
||||
always {
|
||||
script { withCredentials([[$class: 'AmazonWebServicesCredentialsBinding', accessKeyVariable: 'AWS_ACCESS_KEY_ID', credentialsId: 'AWS_ACCESS_KEY_ID', secretKeyVariable: 'AWS_SECRET_ACCESS_KEY']]) {
|
||||
sshagent(credentials: ['jenkins-testing-ssh-key']) {
|
||||
sh 'ssh-add ~/.ssh/jenkins-testing.pem'
|
||||
sh 'bundle exec kitchen destroy $TEST_SUITE-$TEST_PLATFORM'
|
||||
try {
|
||||
stage('github-pending') {
|
||||
githubNotify credentialsId: 'test-jenkins-credentials',
|
||||
description: "running ${TEST_SUITE}-${TEST_PLATFORM}...",
|
||||
status: 'PENDING',
|
||||
context: "jenkins/pr/${TEST_SUITE}-${TEST_PLATFORM}"
|
||||
}
|
||||
}}
|
||||
archiveArtifacts artifacts: 'artifacts/xml-unittests-output/*.xml'
|
||||
archiveArtifacts artifacts: 'artifacts/logs/minion'
|
||||
archiveArtifacts artifacts: 'artifacts/logs/salt-runtests.log'
|
||||
stage('setup-bundle') {
|
||||
sh 'bundle install --with ec2 windows --without opennebula docker'
|
||||
}
|
||||
try {
|
||||
stage('run kitchen') {
|
||||
withCredentials([
|
||||
[$class: 'AmazonWebServicesCredentialsBinding', accessKeyVariable: 'AWS_ACCESS_KEY_ID', credentialsId: 'AWS_ACCESS_KEY_ID', secretKeyVariable: 'AWS_SECRET_ACCESS_KEY']
|
||||
]) {
|
||||
sshagent(credentials: ['jenkins-testing-ssh-key']) {
|
||||
sh 'ssh-add ~/.ssh/jenkins-testing.pem'
|
||||
sh 'bundle exec kitchen converge $TEST_SUITE-$TEST_PLATFORM || bundle exec kitchen converge $TEST_SUITE-$TEST_PLATFORM'
|
||||
sh 'bundle exec kitchen verify $TEST_SUITE-$TEST_PLATFORM'
|
||||
}
|
||||
}
|
||||
}
|
||||
} finally {
|
||||
stage('cleanup kitchen') {
|
||||
script {
|
||||
withCredentials([
|
||||
[$class: 'AmazonWebServicesCredentialsBinding', accessKeyVariable: 'AWS_ACCESS_KEY_ID', credentialsId: 'AWS_ACCESS_KEY_ID', secretKeyVariable: 'AWS_SECRET_ACCESS_KEY']
|
||||
]) {
|
||||
sshagent(credentials: ['jenkins-testing-ssh-key']) {
|
||||
sh 'ssh-add ~/.ssh/jenkins-testing.pem'
|
||||
sh 'bundle exec kitchen destroy $TEST_SUITE-$TEST_PLATFORM'
|
||||
}
|
||||
}
|
||||
}
|
||||
archiveArtifacts artifacts: 'artifacts/xml-unittests-output/*.xml'
|
||||
archiveArtifacts artifacts: 'artifacts/logs/minion'
|
||||
archiveArtifacts artifacts: 'artifacts/logs/salt-runtests.log'
|
||||
}
|
||||
}
|
||||
} catch (Exception e) {
|
||||
currentBuild.result = 'FAILURE'
|
||||
} finally {
|
||||
try {
|
||||
junit 'artifacts/xml-unittests-output/*.xml'
|
||||
} finally {
|
||||
cleanWs notFailBuild: true
|
||||
def currentResult = currentBuild.result ?: 'SUCCESS'
|
||||
if (currentResult == 'SUCCESS') {
|
||||
githubNotify credentialsId: 'test-jenkins-credentials',
|
||||
description: "The ${TEST_SUITE}-${TEST_PLATFORM} job has passed",
|
||||
status: 'SUCCESS',
|
||||
context: "jenkins/pr/${TEST_SUITE}-${TEST_PLATFORM}"
|
||||
} else {
|
||||
githubNotify credentialsId: 'test-jenkins-credentials',
|
||||
description: "The ${TEST_SUITE}-${TEST_PLATFORM} job has failed",
|
||||
status: 'FAILURE',
|
||||
context: "jenkins/pr/${TEST_SUITE}-${TEST_PLATFORM}"
|
||||
slackSend channel: "#jenkins-prod-pr",
|
||||
color: '#FF0000',
|
||||
message: "FAILED: PR-Job: '${env.JOB_NAME} [${env.BUILD_NUMBER}]' (${env.BUILD_URL})"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
post {
|
||||
always {
|
||||
junit 'artifacts/xml-unittests-output/*.xml'
|
||||
cleanWs()
|
||||
}
|
||||
success {
|
||||
githubNotify credentialsId: 'test-jenkins-credentials',
|
||||
description: "The ${TEST_SUITE}-${TEST_PLATFORM} job has passed",
|
||||
status: 'SUCCESS',
|
||||
context: "jenkins/pr/${TEST_SUITE}-${TEST_PLATFORM}"
|
||||
}
|
||||
failure {
|
||||
githubNotify credentialsId: 'test-jenkins-credentials',
|
||||
description: "The ${TEST_SUITE}-${TEST_PLATFORM} job has failed",
|
||||
status: 'FAILURE',
|
||||
context: "jenkins/pr/${TEST_SUITE}-${TEST_PLATFORM}"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
85
.ci/kitchen-windows2016-py2
Normal file
85
.ci/kitchen-windows2016-py2
Normal file
|
@ -0,0 +1,85 @@
|
|||
timeout(time: 6, unit: 'HOURS') {
|
||||
node('kitchen-slave') {
|
||||
timestamps {
|
||||
ansiColor('xterm') {
|
||||
withEnv([
|
||||
'SALT_KITCHEN_PLATFORMS=/var/jenkins/workspace/platforms.yml',
|
||||
'SALT_KITCHEN_DRIVER=/var/jenkins/workspace/driver.yml',
|
||||
'PATH=/usr/local/rbenv/shims/:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/root/bin:/root/bin',
|
||||
'RBENV_VERSION=2.4.2',
|
||||
'TEST_SUITE=py2',
|
||||
'TEST_PLATFORM=windows-2016',
|
||||
'PY_COLORS=1',
|
||||
]) {
|
||||
stage('checkout-scm') {
|
||||
cleanWs notFailBuild: true
|
||||
checkout scm
|
||||
}
|
||||
try {
|
||||
stage('github-pending') {
|
||||
githubNotify credentialsId: 'test-jenkins-credentials',
|
||||
description: "running ${TEST_SUITE}-${TEST_PLATFORM}...",
|
||||
status: 'PENDING',
|
||||
context: "jenkins/pr/${TEST_SUITE}-${TEST_PLATFORM}"
|
||||
}
|
||||
stage('setup-bundle') {
|
||||
sh 'bundle install --with ec2 windows --without opennebula docker'
|
||||
}
|
||||
try {
|
||||
stage('run kitchen') {
|
||||
withCredentials([
|
||||
[$class: 'AmazonWebServicesCredentialsBinding', accessKeyVariable: 'AWS_ACCESS_KEY_ID', credentialsId: 'AWS_ACCESS_KEY_ID', secretKeyVariable: 'AWS_SECRET_ACCESS_KEY']
|
||||
]) {
|
||||
sshagent(credentials: ['jenkins-testing-ssh-key']) {
|
||||
sh 'ssh-add ~/.ssh/jenkins-testing.pem'
|
||||
sh 'bundle exec kitchen converge $TEST_SUITE-$TEST_PLATFORM || bundle exec kitchen converge $TEST_SUITE-$TEST_PLATFORM'
|
||||
sh 'bundle exec kitchen verify $TEST_SUITE-$TEST_PLATFORM'
|
||||
}
|
||||
}
|
||||
}
|
||||
} finally {
|
||||
stage('cleanup kitchen') {
|
||||
script {
|
||||
withCredentials([
|
||||
[$class: 'AmazonWebServicesCredentialsBinding', accessKeyVariable: 'AWS_ACCESS_KEY_ID', credentialsId: 'AWS_ACCESS_KEY_ID', secretKeyVariable: 'AWS_SECRET_ACCESS_KEY']
|
||||
]) {
|
||||
sshagent(credentials: ['jenkins-testing-ssh-key']) {
|
||||
sh 'ssh-add ~/.ssh/jenkins-testing.pem'
|
||||
sh 'bundle exec kitchen destroy $TEST_SUITE-$TEST_PLATFORM'
|
||||
}
|
||||
}
|
||||
}
|
||||
archiveArtifacts artifacts: 'artifacts/xml-unittests-output/*.xml'
|
||||
archiveArtifacts artifacts: 'artifacts/logs/minion'
|
||||
archiveArtifacts artifacts: 'artifacts/logs/salt-runtests.log'
|
||||
}
|
||||
}
|
||||
} catch (Exception e) {
|
||||
currentBuild.result = 'FAILURE'
|
||||
} finally {
|
||||
try {
|
||||
junit 'artifacts/xml-unittests-output/*.xml'
|
||||
} finally {
|
||||
cleanWs notFailBuild: true
|
||||
def currentResult = currentBuild.result ?: 'SUCCESS'
|
||||
if (currentResult == 'SUCCESS') {
|
||||
githubNotify credentialsId: 'test-jenkins-credentials',
|
||||
description: "The ${TEST_SUITE}-${TEST_PLATFORM} job has passed",
|
||||
status: 'SUCCESS',
|
||||
context: "jenkins/pr/${TEST_SUITE}-${TEST_PLATFORM}"
|
||||
} else {
|
||||
githubNotify credentialsId: 'test-jenkins-credentials',
|
||||
description: "The ${TEST_SUITE}-${TEST_PLATFORM} job has failed",
|
||||
status: 'FAILURE',
|
||||
context: "jenkins/pr/${TEST_SUITE}-${TEST_PLATFORM}"
|
||||
slackSend channel: "#jenkins-prod-pr",
|
||||
color: '#FF0000',
|
||||
message: "FAILED: PR-Job: '${env.JOB_NAME} [${env.BUILD_NUMBER}]' (${env.BUILD_URL})"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
85
.ci/kitchen-windows2016-py3
Normal file
85
.ci/kitchen-windows2016-py3
Normal file
|
@ -0,0 +1,85 @@
|
|||
timeout(time: 6, unit: 'HOURS') {
|
||||
node('kitchen-slave') {
|
||||
timestamps {
|
||||
ansiColor('xterm') {
|
||||
withEnv([
|
||||
'SALT_KITCHEN_PLATFORMS=/var/jenkins/workspace/platforms.yml',
|
||||
'SALT_KITCHEN_DRIVER=/var/jenkins/workspace/driver.yml',
|
||||
'PATH=/usr/local/rbenv/shims/:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/root/bin:/root/bin',
|
||||
'RBENV_VERSION=2.4.2',
|
||||
'TEST_SUITE=py3',
|
||||
'TEST_PLATFORM=windows-2016',
|
||||
'PY_COLORS=1',
|
||||
]) {
|
||||
stage('checkout-scm') {
|
||||
cleanWs notFailBuild: true
|
||||
checkout scm
|
||||
}
|
||||
try {
|
||||
stage('github-pending') {
|
||||
githubNotify credentialsId: 'test-jenkins-credentials',
|
||||
description: "running ${TEST_SUITE}-${TEST_PLATFORM}...",
|
||||
status: 'PENDING',
|
||||
context: "jenkins/pr/${TEST_SUITE}-${TEST_PLATFORM}"
|
||||
}
|
||||
stage('setup-bundle') {
|
||||
sh 'bundle install --with ec2 windows --without opennebula docker'
|
||||
}
|
||||
try {
|
||||
stage('run kitchen') {
|
||||
withCredentials([
|
||||
[$class: 'AmazonWebServicesCredentialsBinding', accessKeyVariable: 'AWS_ACCESS_KEY_ID', credentialsId: 'AWS_ACCESS_KEY_ID', secretKeyVariable: 'AWS_SECRET_ACCESS_KEY']
|
||||
]) {
|
||||
sshagent(credentials: ['jenkins-testing-ssh-key']) {
|
||||
sh 'ssh-add ~/.ssh/jenkins-testing.pem'
|
||||
sh 'bundle exec kitchen converge $TEST_SUITE-$TEST_PLATFORM || bundle exec kitchen converge $TEST_SUITE-$TEST_PLATFORM'
|
||||
sh 'bundle exec kitchen verify $TEST_SUITE-$TEST_PLATFORM'
|
||||
}
|
||||
}
|
||||
}
|
||||
} finally {
|
||||
stage('cleanup kitchen') {
|
||||
script {
|
||||
withCredentials([
|
||||
[$class: 'AmazonWebServicesCredentialsBinding', accessKeyVariable: 'AWS_ACCESS_KEY_ID', credentialsId: 'AWS_ACCESS_KEY_ID', secretKeyVariable: 'AWS_SECRET_ACCESS_KEY']
|
||||
]) {
|
||||
sshagent(credentials: ['jenkins-testing-ssh-key']) {
|
||||
sh 'ssh-add ~/.ssh/jenkins-testing.pem'
|
||||
sh 'bundle exec kitchen destroy $TEST_SUITE-$TEST_PLATFORM'
|
||||
}
|
||||
}
|
||||
}
|
||||
archiveArtifacts artifacts: 'artifacts/xml-unittests-output/*.xml'
|
||||
archiveArtifacts artifacts: 'artifacts/logs/minion'
|
||||
archiveArtifacts artifacts: 'artifacts/logs/salt-runtests.log'
|
||||
}
|
||||
}
|
||||
} catch (Exception e) {
|
||||
currentBuild.result = 'FAILURE'
|
||||
} finally {
|
||||
try {
|
||||
junit 'artifacts/xml-unittests-output/*.xml'
|
||||
} finally {
|
||||
cleanWs notFailBuild: true
|
||||
def currentResult = currentBuild.result ?: 'SUCCESS'
|
||||
if (currentResult == 'SUCCESS') {
|
||||
githubNotify credentialsId: 'test-jenkins-credentials',
|
||||
description: "The ${TEST_SUITE}-${TEST_PLATFORM} job has passed",
|
||||
status: 'SUCCESS',
|
||||
context: "jenkins/pr/${TEST_SUITE}-${TEST_PLATFORM}"
|
||||
} else {
|
||||
githubNotify credentialsId: 'test-jenkins-credentials',
|
||||
description: "The ${TEST_SUITE}-${TEST_PLATFORM} job has failed",
|
||||
status: 'FAILURE',
|
||||
context: "jenkins/pr/${TEST_SUITE}-${TEST_PLATFORM}"
|
||||
slackSend channel: "#jenkins-prod-pr",
|
||||
color: '#FF0000',
|
||||
message: "FAILED: PR-Job: '${env.JOB_NAME} [${env.BUILD_NUMBER}]' (${env.BUILD_URL})"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
150
.ci/lint
150
.ci/lint
|
@ -3,6 +3,7 @@ pipeline {
|
|||
options {
|
||||
timestamps()
|
||||
ansiColor('xterm')
|
||||
timeout(time: 3, unit: 'HOURS')
|
||||
}
|
||||
environment {
|
||||
PYENV_ROOT = "/usr/local/pyenv"
|
||||
|
@ -13,60 +14,161 @@ pipeline {
|
|||
stage('github-pending') {
|
||||
steps {
|
||||
githubNotify credentialsId: 'test-jenkins-credentials',
|
||||
description: 'Testing lint...',
|
||||
description: 'Python lint on changes begins...',
|
||||
status: 'PENDING',
|
||||
context: "jenkins/pr/lint"
|
||||
}
|
||||
}
|
||||
stage('setup') {
|
||||
steps {
|
||||
sh 'eval "$(pyenv init -)"; pyenv install 2.7.14 || echo "We already have this python."; pyenv local 2.7.14; pyenv shell 2.7.14'
|
||||
sh 'eval "$(pyenv init -)"; pip install tox'
|
||||
sh '''
|
||||
# Need -M to detect renames otherwise they are reported as Delete and Add, need -C to detect copies, -C includes -M
|
||||
# -M is on by default in git 2.9+
|
||||
git diff --name-status -l99999 -C "origin/$CHANGE_TARGET" > file-list-status.log
|
||||
# the -l increase the search limit, lets use awk so we do not need to repeat the search above.
|
||||
gawk 'BEGIN {FS="\\t"} {if ($1 != "D") {print $NF}}' file-list-status.log > file-list-changed.log
|
||||
gawk 'BEGIN {FS="\\t"} {if ($1 == "D") {print $NF}}' file-list-status.log > file-list-deleted.log
|
||||
(git diff --name-status -l99999 -C "origin/$CHANGE_TARGET" "origin/$BRANCH_NAME";echo "---";git diff --name-status -l99999 -C "origin/$BRANCH_NAME";printenv|grep -E '=[0-9a-z]{40,}+$|COMMIT=|BRANCH') > file-list-experiment.log
|
||||
echo 254 > pylint-salt-chg.exit # assume failure
|
||||
echo 254 > pylint-salt-full.exit # assume failure
|
||||
echo 254 > pylint-tests-chg.exit # assume failure
|
||||
echo 254 > pylint-tests-full.exit # assume failure
|
||||
eval "$(pyenv init -)"
|
||||
pyenv --version
|
||||
pyenv install --skip-existing 2.7.14
|
||||
pyenv local 2.7.14
|
||||
pyenv shell 2.7.14
|
||||
python --version
|
||||
pip install tox
|
||||
'''
|
||||
archiveArtifacts artifacts: 'file-list-status.log,file-list-changed.log,file-list-deleted.log,file-list-experiment.log'
|
||||
}
|
||||
}
|
||||
stage('linting') {
|
||||
failFast false
|
||||
stage('linting chg') {
|
||||
parallel {
|
||||
stage('salt linting') {
|
||||
stage('lint salt chg') {
|
||||
when {
|
||||
expression { return readFile('file-list-changed.log') =~ /(?i)(^|\n)(salt\/.*\.py|setup\.py)\n/ }
|
||||
}
|
||||
steps {
|
||||
sh 'eval "$(pyenv init - --no-rehash)"; tox -e pylint-salt $(find salt/ -name "*.py" -exec git diff --name-only "origin/$CHANGE_TARGET" "origin/$BRANCH_NAME" setup.py {} +) | tee pylint-report.xml'
|
||||
archiveArtifacts artifacts: 'pylint-report.xml'
|
||||
sh '''
|
||||
eval "$(pyenv init - --no-rehash)"
|
||||
# tee makes the exit/return code always 0
|
||||
grep -Ei '^salt/.*\\.py$|^setup\\.py$' file-list-changed.log | (xargs -r '--delimiter=\\n' tox -e pylint-salt ; echo "$?" > pylint-salt-chg.exit) | tee pylint-report-salt-chg.log
|
||||
# remove color escape coding
|
||||
sed -ri 's/\\x1B\\[([0-9]{1,2}(;[0-9]{1,2})?)?[m|K]//g' pylint-report-salt-chg.log
|
||||
read rc_exit < pylint-salt-chg.exit
|
||||
exit "$rc_exit"
|
||||
'''
|
||||
}
|
||||
}
|
||||
stage('test linting') {
|
||||
steps {
|
||||
sh 'eval "$(pyenv init - --no-rehash)"; tox -e pylint-tests $(find tests/ -name "*.py" -exec git diff --name-only "origin/$CHANGE_TARGET" "origin/$BRANCH_NAME" {} +) | tee pylint-report-tests.xml'
|
||||
archiveArtifacts artifacts: 'pylint-report-tests.xml'
|
||||
stage('lint test chg') {
|
||||
when {
|
||||
expression { return readFile('file-list-changed.log') =~ /(?i)(^|\n)tests\/.*\.py\n/ }
|
||||
}
|
||||
steps {
|
||||
sh '''
|
||||
eval "$(pyenv init - --no-rehash)"
|
||||
# tee makes the exit/return code always 0
|
||||
grep -Ei '^tests/.*\\.py$' file-list-changed.log | (xargs -r '--delimiter=\\n' tox -e pylint-tests ; echo "$?" > pylint-tests-chg.exit) | tee pylint-report-tests-chg.log
|
||||
# remove color escape coding
|
||||
sed -ri 's/\\x1B\\[([0-9]{1,2}(;[0-9]{1,2})?)?[m|K]//g' pylint-report-tests-chg.log
|
||||
read rc_exit < pylint-tests-chg.exit
|
||||
exit "$rc_exit"
|
||||
'''
|
||||
}
|
||||
}
|
||||
}
|
||||
post {
|
||||
always {
|
||||
archiveArtifacts artifacts: 'pylint-report-*-chg.log', allowEmptyArchive: true
|
||||
step([$class: 'WarningsPublisher',
|
||||
parserConfigurations: [[
|
||||
parserName: 'PyLint',
|
||||
pattern: 'pylint-report-*-chg.log'
|
||||
]],
|
||||
failedTotalAll: '0',
|
||||
useDeltaValues: false,
|
||||
canRunOnFailed: true,
|
||||
usePreviousBuildAsReference: true
|
||||
])
|
||||
}
|
||||
}
|
||||
}
|
||||
stage('linting all') {
|
||||
// perform a full linit if this is a merge forward and the change only lint passed.
|
||||
when {
|
||||
expression { return params.CHANGE_BRANCH =~ /(?i)^merge[._-]/ }
|
||||
}
|
||||
parallel {
|
||||
stage('setup full') {
|
||||
steps {
|
||||
githubNotify credentialsId: 'test-jenkins-credentials',
|
||||
description: 'Python lint on everything begins...',
|
||||
status: 'PENDING',
|
||||
context: "jenkins/pr/lint"
|
||||
}
|
||||
}
|
||||
stage('lint salt full') {
|
||||
steps {
|
||||
sh '''
|
||||
eval "$(pyenv init - --no-rehash)"
|
||||
(tox -e pylint-salt ; echo "$?" > pylint-salt-full.exit) | tee pylint-report-salt-full.log
|
||||
# remove color escape coding
|
||||
sed -ri 's/\\x1B\\[([0-9]{1,2}(;[0-9]{1,2})?)?[m|K]//g' pylint-report-salt-full.log
|
||||
read rc_exit < pylint-salt-full.exit
|
||||
exit "$rc_exit"
|
||||
'''
|
||||
}
|
||||
}
|
||||
stage('lint test full') {
|
||||
steps {
|
||||
sh '''
|
||||
eval "$(pyenv init - --no-rehash)"
|
||||
(tox -e pylint-tests ; echo "$?" > pylint-tests-full.exit) | tee pylint-report-tests-full.log
|
||||
# remove color escape coding
|
||||
sed -ri 's/\\x1B\\[([0-9]{1,2}(;[0-9]{1,2})?)?[m|K]//g' pylint-report-tests-full.log
|
||||
read rc_exit < pylint-tests-full.exit
|
||||
exit "$rc_exit"
|
||||
'''
|
||||
}
|
||||
}
|
||||
}
|
||||
post {
|
||||
always {
|
||||
archiveArtifacts artifacts: 'pylint-report-*-full.log', allowEmptyArchive: true
|
||||
step([$class: 'WarningsPublisher',
|
||||
parserConfigurations: [[
|
||||
parserName: 'PyLint',
|
||||
pattern: 'pylint-report-*-full.log'
|
||||
]],
|
||||
failedTotalAll: '0',
|
||||
useDeltaValues: false,
|
||||
canRunOnFailed: true,
|
||||
usePreviousBuildAsReference: true
|
||||
])
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
post {
|
||||
always {
|
||||
step([$class: 'WarningsPublisher',
|
||||
parserConfigurations: [[
|
||||
parserName: 'PyLint',
|
||||
pattern: 'pylint-report*.xml'
|
||||
]],
|
||||
failedTotalAll: '0',
|
||||
useDeltaValues: false,
|
||||
canRunOnFailed: true,
|
||||
usePreviousBuildAsReference: true
|
||||
])
|
||||
cleanWs()
|
||||
}
|
||||
success {
|
||||
githubNotify credentialsId: 'test-jenkins-credentials',
|
||||
description: 'The lint job has passed',
|
||||
description: 'Python lint test has passed',
|
||||
status: 'SUCCESS',
|
||||
context: "jenkins/pr/lint"
|
||||
}
|
||||
failure {
|
||||
githubNotify credentialsId: 'test-jenkins-credentials',
|
||||
description: 'The lint job has failed',
|
||||
description: 'Python lint test has failed',
|
||||
status: 'FAILURE',
|
||||
context: "jenkins/pr/lint"
|
||||
slackSend channel: "#jenkins-prod-pr",
|
||||
color: '#FF0000',
|
||||
message: "FAILED: PR-Job: '${env.JOB_NAME} [${env.BUILD_NUMBER}]' (${env.BUILD_URL})"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
38
.github/CODEOWNERS
vendored
38
.github/CODEOWNERS
vendored
|
@ -1,20 +1,19 @@
|
|||
# SALTSTACK CODE OWNERS
|
||||
|
||||
# See https://help.github.com/articles/about-codeowners/
|
||||
# for more info about CODEOWNERS file
|
||||
|
||||
# Lines starting with '#' are comments.
|
||||
# Each line is a file pattern followed by one or more owners.
|
||||
|
||||
# See https://help.github.com/articles/about-codeowners/
|
||||
# for more info about the CODEOWNERS file
|
||||
|
||||
# Lines starting with '#' are comments.
|
||||
# Each line is a file pattern followed by one or more owners.
|
||||
|
||||
# This file uses an fnmatch-style matching pattern.
|
||||
|
||||
# Team Boto
|
||||
salt/**/*boto* @saltstack/team-boto
|
||||
salt/*/*boto* @saltstack/team-boto
|
||||
|
||||
# Team Core
|
||||
requirements/* @saltstack/team-core
|
||||
rfcs/* @saltstack/team-core
|
||||
salt/auth/* @saltstack/team-core
|
||||
salt/cache/* @saltstack/team-core
|
||||
salt/cli/* @saltstack/team-core
|
||||
|
@ -24,14 +23,16 @@ salt/daemons/* @saltstack/team-core
|
|||
salt/pillar/* @saltstack/team-core
|
||||
salt/loader.py @saltstack/team-core
|
||||
salt/payload.py @saltstack/team-core
|
||||
salt/**/master* @saltstack/team-core
|
||||
salt/**/minion* @saltstack/team-core
|
||||
salt/master.py @saltstack/team-core
|
||||
salt/*/master* @saltstack/team-core
|
||||
salt/minion.py @saltstack/team-core
|
||||
salt/*/minion* @saltstack/team-core
|
||||
|
||||
# Team Cloud
|
||||
salt/cloud/* @saltstack/team-cloud
|
||||
salt/utils/openstack/* @saltstack/team-cloud
|
||||
salt/utils/aws.py @saltstack/team-cloud
|
||||
salt/**/*cloud* @saltstack/team-cloud
|
||||
salt/*/*cloud* @saltstack/team-cloud
|
||||
|
||||
# Team NetAPI
|
||||
salt/cli/api.py @saltstack/team-netapi
|
||||
|
@ -50,18 +51,18 @@ salt/cli/ssh.py @saltstack/team-ssh
|
|||
salt/client/ssh/* @saltstack/team-ssh
|
||||
salt/roster/* @saltstack/team-ssh
|
||||
salt/runners/ssh.py @saltstack/team-ssh
|
||||
salt/**/thin.py @saltstack/team-ssh
|
||||
salt/*/thin.py @saltstack/team-ssh
|
||||
|
||||
# Team State
|
||||
salt/state.py @saltstack/team-state
|
||||
|
||||
# Team SUSE
|
||||
salt/**/*btrfs* @saltstack/team-suse
|
||||
salt/**/*kubernetes* @saltstack/team-suse
|
||||
salt/**/*pkg* @saltstack/team-suse
|
||||
salt/**/*snapper* @saltstack/team-suse
|
||||
salt/**/*xfs* @saltstack/team-suse
|
||||
salt/**/*zypper* @saltstack/team-suse
|
||||
salt/*/*btrfs* @saltstack/team-suse
|
||||
salt/*/*kubernetes* @saltstack/team-suse
|
||||
salt/*/*pkg* @saltstack/team-suse
|
||||
salt/*/*snapper* @saltstack/team-suse
|
||||
salt/*/*xfs* @saltstack/team-suse
|
||||
salt/*/*zypper* @saltstack/team-suse
|
||||
|
||||
# Team Transport
|
||||
salt/transport/* @saltstack/team-transport
|
||||
|
@ -73,3 +74,6 @@ salt/modules/reg.py @saltstack/team-windows
|
|||
salt/states/reg.py @saltstack/team-windows
|
||||
tests/*/*win* @saltstack/team-windows
|
||||
tests/*/test_reg.py @saltstack/team-windows
|
||||
|
||||
# Jenkins Integration
|
||||
.ci/* @saltstack/saltstack-sre-team @saltstack/team-core
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
---
|
||||
<% vagrant = system('gem list -i kitchen-vagrant 2>/dev/null >/dev/null') %>
|
||||
<% version = '2017.7.6' %>
|
||||
<% version = '2018.3.3' %>
|
||||
<% platformsfile = ENV['SALT_KITCHEN_PLATFORMS'] || '.kitchen/platforms.yml' %>
|
||||
<% driverfile = ENV['SALT_KITCHEN_DRIVER'] || '.kitchen/driver.yml' %>
|
||||
<% verifierfile = ENV['SALT_KITCHEN_VERIFIER'] || '.kitchen/verifier.yml' %>
|
||||
|
@ -30,7 +30,7 @@ provisioner:
|
|||
salt_install: bootstrap
|
||||
salt_version: latest
|
||||
salt_bootstrap_url: https://bootstrap.saltstack.com
|
||||
salt_bootstrap_options: -X -p rsync stable <%= version %>
|
||||
salt_bootstrap_options: -X -p rsync git <%= version %>
|
||||
log_level: info
|
||||
sudo: true
|
||||
require_chef: false
|
||||
|
@ -223,6 +223,7 @@ verifier:
|
|||
sudo: true
|
||||
run_destructive: true
|
||||
transport: zeromq
|
||||
enable_filenames: true
|
||||
types:
|
||||
- ssh
|
||||
xml: /tmp/xml-unittests-output/
|
||||
|
|
13
CONTRIBUTING.rst
Normal file
13
CONTRIBUTING.rst
Normal file
|
@ -0,0 +1,13 @@
|
|||
Developing Salt
|
||||
===============
|
||||
|
||||
The Salt development team is welcoming, positive, and dedicated to
|
||||
helping people get new code and fixes into SaltStack projects. Log into
|
||||
GitHub and get started with one of the largest developer communities in
|
||||
the world. The following links should get you started:
|
||||
|
||||
`<https://github.com/saltstack>`_
|
||||
|
||||
`<https://docs.saltstack.com/en/latest/topics/development/index.html>`_
|
||||
|
||||
`<https://docs.saltstack.com/en/develop/topics/development/pull_requests.html>`_
|
12
Gemfile
12
Gemfile
|
@ -2,7 +2,8 @@
|
|||
|
||||
source 'https://rubygems.org'
|
||||
|
||||
gem 'test-kitchen', '~>1.21'
|
||||
# Point this back at the test-kitchen package after 1.23.3 is relased
|
||||
gem 'test-kitchen', :git => 'https://github.com/dwoz/test-kitchen.git', :branch => 'winrm_opts'
|
||||
gem 'kitchen-salt', '~>0.2'
|
||||
gem 'kitchen-sync'
|
||||
gem 'git'
|
||||
|
@ -12,12 +13,15 @@ group :docker do
|
|||
end
|
||||
|
||||
group :windows do
|
||||
gem 'vagrant-wrapper'
|
||||
gem 'kitchen-vagrant'
|
||||
gem 'winrm', '~>2.0'
|
||||
gem 'winrm-fs', '~>1.2.1'
|
||||
gem 'winrm-fs', '~>1.3.1'
|
||||
end
|
||||
|
||||
group :ec2 do
|
||||
gem 'kitchen-ec2'
|
||||
end
|
||||
|
||||
group :vagrant do
|
||||
gem 'vagrant-wrapper'
|
||||
gem 'kitchen-vagrant'
|
||||
end
|
||||
|
|
2
LICENSE
2
LICENSE
|
@ -1,6 +1,6 @@
|
|||
Salt - Remote execution system
|
||||
|
||||
Copyright 2014-2016 SaltStack Team
|
||||
Copyright 2014-2019 SaltStack Team
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
|
5
NOTICE
Normal file
5
NOTICE
Normal file
|
@ -0,0 +1,5 @@
|
|||
Apache SaltStack
|
||||
Copyright 2014-2019 The Apache Software Foundation
|
||||
|
||||
This product includes software developed at
|
||||
The Apache Software Foundation (http://www.apache.org/).
|
47
README.rst
47
README.rst
|
@ -34,39 +34,6 @@ documentation.
|
|||
|
||||
`<https://docs.saltstack.com/en/latest/>`_
|
||||
|
||||
Get SaltStack Support and Help
|
||||
==============================
|
||||
|
||||
**IRC Chat** - Join the vibrant, helpful and positive SaltStack chat room in
|
||||
Freenode at #salt. There is no need to introduce yourself, or ask permission to
|
||||
join in, just help and be helped! Make sure to wait for an answer, sometimes it
|
||||
may take a few moments for someone to reply.
|
||||
|
||||
`<http://webchat.freenode.net/?channels=salt&uio=Mj10cnVlJjk9dHJ1ZSYxMD10cnVl83>`_
|
||||
|
||||
**SaltStack Slack** - Alongside IRC is our SaltStack Community Slack for the
|
||||
SaltStack Working groups. Use the following link to request an invitation.
|
||||
|
||||
`<https://saltstackcommunity.herokuapp.com/>`_
|
||||
|
||||
**Mailing List** - The SaltStack community users mailing list is hosted by
|
||||
Google groups. Anyone can post to ask questions about SaltStack products and
|
||||
anyone can help answer. Join the conversation!
|
||||
|
||||
`<https://groups.google.com/forum/#!forum/salt-users>`_
|
||||
|
||||
You may subscribe to the list without a Google account by emailing
|
||||
salt-users+subscribe@googlegroups.com and you may post to the list by emailing
|
||||
salt-users@googlegroups.com
|
||||
|
||||
**Reporting Issues** - To report an issue with Salt, please follow the
|
||||
guidelines for filing bug reports:
|
||||
`<https://docs.saltstack.com/en/develop/topics/development/reporting_bugs.html>`_
|
||||
|
||||
**SaltStack Support** - If you need dedicated, prioritized support, please
|
||||
consider a SaltStack Support package that fits your needs:
|
||||
`<http://www.saltstack.com/support>`_
|
||||
|
||||
Engage SaltStack
|
||||
================
|
||||
|
||||
|
@ -100,17 +67,3 @@ services`_ offerings.
|
|||
.. _SaltStack education offerings: http://saltstack.com/training/
|
||||
.. _SaltStack Certified Engineer (SSCE): http://saltstack.com/certification/
|
||||
.. _SaltStack professional services: http://saltstack.com/services/
|
||||
|
||||
Developing Salt
|
||||
===============
|
||||
|
||||
The Salt development team is welcoming, positive, and dedicated to
|
||||
helping people get new code and fixes into SaltStack projects. Log into
|
||||
GitHub and get started with one of the largest developer communities in
|
||||
the world. The following links should get you started:
|
||||
|
||||
`<https://github.com/saltstack>`_
|
||||
|
||||
`<https://docs.saltstack.com/en/latest/topics/development/index.html>`_
|
||||
|
||||
`<https://docs.saltstack.com/en/develop/topics/development/pull_requests.html>`_
|
||||
|
|
33
SUPPORT.rst
Normal file
33
SUPPORT.rst
Normal file
|
@ -0,0 +1,33 @@
|
|||
Get SaltStack Support and Help
|
||||
==============================
|
||||
|
||||
**IRC Chat** - Join the vibrant, helpful and positive SaltStack chat room in
|
||||
Freenode at #salt. There is no need to introduce yourself, or ask permission to
|
||||
join in, just help and be helped! Make sure to wait for an answer, sometimes it
|
||||
may take a few moments for someone to reply.
|
||||
|
||||
`<http://webchat.freenode.net/?channels=salt&uio=Mj10cnVlJjk9dHJ1ZSYxMD10cnVl83>`_
|
||||
|
||||
**SaltStack Slack** - Alongside IRC is our SaltStack Community Slack for the
|
||||
SaltStack Working groups. Use the following link to request an invitation.
|
||||
|
||||
`<https://saltstackcommunity.herokuapp.com/>`_
|
||||
|
||||
**Mailing List** - The SaltStack community users mailing list is hosted by
|
||||
Google groups. Anyone can post to ask questions about SaltStack products and
|
||||
anyone can help answer. Join the conversation!
|
||||
|
||||
`<https://groups.google.com/forum/#!forum/salt-users>`_
|
||||
|
||||
You may subscribe to the list without a Google account by emailing
|
||||
salt-users+subscribe@googlegroups.com and you may post to the list by emailing
|
||||
salt-users@googlegroups.com
|
||||
|
||||
**Reporting Issues** - To report an issue with Salt, please follow the
|
||||
guidelines for filing bug reports:
|
||||
`<https://docs.saltstack.com/en/develop/topics/development/reporting_bugs.html>`_
|
||||
|
||||
**SaltStack Support** - If you need dedicated, prioritized support, please
|
||||
consider a SaltStack Support package that fits your needs:
|
||||
`<http://www.saltstack.com/support>`_
|
||||
|
24
conf/master
24
conf/master
|
@ -261,24 +261,6 @@
|
|||
# The publisher interface ZeroMQPubServerChannel
|
||||
#pub_hwm: 1000
|
||||
|
||||
# These two ZMQ HWM settings, salt_event_pub_hwm and event_publisher_pub_hwm
|
||||
# are significant for masters with thousands of minions. When these are
|
||||
# insufficiently high it will manifest in random responses missing in the CLI
|
||||
# and even missing from the job cache. Masters that have fast CPUs and many
|
||||
# cores with appropriate worker_threads will not need these set as high.
|
||||
|
||||
# On deployment with 8,000 minions, 2.4GHz CPUs, 24 cores, 32GiB memory has
|
||||
# these settings:
|
||||
#
|
||||
# salt_event_pub_hwm: 128000
|
||||
# event_publisher_pub_hwm: 64000
|
||||
|
||||
# ZMQ high-water-mark for SaltEvent pub socket
|
||||
#salt_event_pub_hwm: 20000
|
||||
|
||||
# ZMQ high-water-mark for EventPublisher pub socket
|
||||
#event_publisher_pub_hwm: 10000
|
||||
|
||||
# The master may allocate memory per-event and not
|
||||
# reclaim it.
|
||||
# To set a high-water mark for memory allocation, use
|
||||
|
@ -574,12 +556,6 @@
|
|||
#
|
||||
#master_tops: {}
|
||||
|
||||
# The external_nodes option allows Salt to gather data that would normally be
|
||||
# placed in a top file. The external_nodes option is the executable that will
|
||||
# return the ENC data. Remember that Salt will look for external nodes AND top
|
||||
# files and combine the results if both are enabled!
|
||||
#external_nodes: None
|
||||
|
||||
# The renderer to use on the minions to render the state data
|
||||
#renderer: yaml_jinja
|
||||
|
||||
|
|
10
conf/minion
10
conf/minion
|
@ -66,6 +66,11 @@
|
|||
# Set to zero if the minion should shutdown and not retry.
|
||||
# retry_dns: 30
|
||||
|
||||
# Set the number of times to attempt to resolve
|
||||
# the master hostname if name resolution fails. Defaults to None,
|
||||
# which will attempt the resolution indefinitely.
|
||||
# retry_dns_count: 3
|
||||
|
||||
# Set the port used by the master reply and authentication server.
|
||||
#master_port: 4506
|
||||
|
||||
|
@ -148,6 +153,11 @@
|
|||
# Set the directory used to hold unix sockets.
|
||||
#sock_dir: /var/run/salt/minion
|
||||
|
||||
# The minion can take a while to start up when lspci and/or dmidecode is used
|
||||
# to populate the grains for the minion. Set this to False if you do not need
|
||||
# GPU hardware grains for your minion.
|
||||
# enable_gpu_grains: True
|
||||
|
||||
# Set the default outputter used by the salt-call command. The default is
|
||||
# "nested".
|
||||
#output: nested
|
||||
|
|
|
@ -263,24 +263,6 @@ syndic_user: salt
|
|||
# The publisher interface ZeroMQPubServerChannel
|
||||
#pub_hwm: 1000
|
||||
|
||||
# These two ZMQ HWM settings, salt_event_pub_hwm and event_publisher_pub_hwm
|
||||
# are significant for masters with thousands of minions. When these are
|
||||
# insufficiently high it will manifest in random responses missing in the CLI
|
||||
# and even missing from the job cache. Masters that have fast CPUs and many
|
||||
# cores with appropriate worker_threads will not need these set as high.
|
||||
|
||||
# On deployment with 8,000 minions, 2.4GHz CPUs, 24 cores, 32GiB memory has
|
||||
# these settings:
|
||||
#
|
||||
# salt_event_pub_hwm: 128000
|
||||
# event_publisher_pub_hwm: 64000
|
||||
|
||||
# ZMQ high-water-mark for SaltEvent pub socket
|
||||
#salt_event_pub_hwm: 20000
|
||||
|
||||
# ZMQ high-water-mark for EventPublisher pub socket
|
||||
#event_publisher_pub_hwm: 10000
|
||||
|
||||
# The master may allocate memory per-event and not
|
||||
# reclaim it.
|
||||
# To set a high-water mark for memory allocation, use
|
||||
|
@ -536,12 +518,6 @@ syndic_user: salt
|
|||
#
|
||||
#master_tops: {}
|
||||
|
||||
# The external_nodes option allows Salt to gather data that would normally be
|
||||
# placed in a top file. The external_nodes option is the executable that will
|
||||
# return the ENC data. Remember that Salt will look for external nodes AND top
|
||||
# files and combine the results if both are enabled!
|
||||
#external_nodes: None
|
||||
|
||||
# The renderer to use on the minions to render the state data
|
||||
#renderer: yaml_jinja
|
||||
|
||||
|
|
|
@ -7,7 +7,7 @@ from docutils.parsers.rst import Directive
|
|||
|
||||
from docutils.statemachine import ViewList
|
||||
from sphinx import addnodes
|
||||
from sphinx.directives import ObjectDescription, Directive
|
||||
from sphinx.directives import ObjectDescription
|
||||
from sphinx.domains import Domain, ObjType
|
||||
from sphinx.domains.python import PyObject
|
||||
from sphinx.locale import l_, _
|
||||
|
|
2
doc/_themes/saltstack2/layout.html
vendored
2
doc/_themes/saltstack2/layout.html
vendored
|
@ -256,7 +256,7 @@
|
|||
<!--
|
||||
<a href="https://saltstack.com/saltstack-enterprise/" target="_blank"><img class="nolightbox footer-banner center" src="{{ pathto('_static/images/enterprise_ad.jpg', 1) }}"/></a>
|
||||
-->
|
||||
<a href="http://saltconf.com" target="_blank"><img class="nolightbox footer-banner center" src="{{ pathto('_static/images/DOCBANNER.jpg', 1) }}"/></a>
|
||||
<a href="http://saltconf.com/saltconf18-speakers/" target="_blank"><img class="nolightbox footer-banner center" src="{{ pathto('_static/images/DOCBANNER.png', 1) }}"/></a>
|
||||
</div>
|
||||
{% endif %}
|
||||
</div>
|
||||
|
|
BIN
doc/_themes/saltstack2/static/images/DOCBANNER.jpg
vendored
BIN
doc/_themes/saltstack2/static/images/DOCBANNER.jpg
vendored
Binary file not shown.
Before Width: | Height: | Size: 497 KiB |
BIN
doc/_themes/saltstack2/static/images/DOCBANNER.png
vendored
Normal file
BIN
doc/_themes/saltstack2/static/images/DOCBANNER.png
vendored
Normal file
Binary file not shown.
After Width: | Height: | Size: 767 KiB |
25
doc/conf.py
25
doc/conf.py
|
@ -65,6 +65,7 @@ MOCK_MODULES = [
|
|||
'user',
|
||||
|
||||
# salt core
|
||||
'concurrent',
|
||||
'Crypto',
|
||||
'Crypto.Signature',
|
||||
'Crypto.Cipher',
|
||||
|
@ -250,8 +251,8 @@ on_saltstack = 'SALT_ON_SALTSTACK' in os.environ
|
|||
project = 'Salt'
|
||||
|
||||
version = salt.version.__version__
|
||||
latest_release = '2018.3.2' # latest release
|
||||
previous_release = '2017.7.7' # latest release from previous branch
|
||||
latest_release = '2018.3.3' # latest release
|
||||
previous_release = '2017.7.8' # latest release from previous branch
|
||||
previous_release_dir = '2017.7' # path on web server for previous branch
|
||||
next_release = '' # next release
|
||||
next_release_dir = '' # path on web server for next release branch
|
||||
|
@ -270,15 +271,15 @@ release = latest_release # version, latest_release, previous_release
|
|||
# Set google custom search engine
|
||||
|
||||
if release == latest_release:
|
||||
search_cx = '004624818632696854117:yfmprrbw3pk' # latest
|
||||
elif release.startswith('2014.7'):
|
||||
search_cx = '004624818632696854117:thhslradbru' # 2014.7
|
||||
elif release.startswith('2015.5'):
|
||||
search_cx = '004624818632696854117:ovogwef29do' # 2015.5
|
||||
elif release.startswith('2015.8'):
|
||||
search_cx = '004624818632696854117:aw_tegffouy' # 2015.8
|
||||
search_cx = '011515552685726825874:ht0p8miksrm' # latest
|
||||
elif release.startswith('2018.3'):
|
||||
search_cx = '011515552685726825874:vadptdpvyyu' # 2018.3
|
||||
elif release.startswith('2017.7'):
|
||||
search_cx = '011515552685726825874:w-hxmnbcpou' # 2017.7
|
||||
elif release.startswith('2016.11'):
|
||||
search_cx = '011515552685726825874:dlsj745pvhq' # 2016.11
|
||||
else:
|
||||
search_cx = '004624818632696854117:haj7bjntf4s' # develop
|
||||
search_cx = '011515552685726825874:x17j5zl74g8' # develop
|
||||
|
||||
needs_sphinx = '1.3'
|
||||
|
||||
|
@ -301,8 +302,8 @@ extensions = [
|
|||
'sphinx.ext.intersphinx',
|
||||
'httpdomain',
|
||||
'youtube',
|
||||
'saltautodoc', # Must be AFTER autodoc
|
||||
'shorturls',
|
||||
#'saltautodoc', # Must be AFTER autodoc
|
||||
#'shorturls',
|
||||
]
|
||||
|
||||
try:
|
||||
|
|
|
@ -7182,25 +7182,6 @@ master_tops:
|
|||
.fi
|
||||
.UNINDENT
|
||||
.UNINDENT
|
||||
.SS \fBexternal_nodes\fP
|
||||
.sp
|
||||
Default: None
|
||||
.sp
|
||||
The external_nodes option allows Salt to gather data that would normally be
|
||||
placed in a top file from and external node controller. The external_nodes
|
||||
option is the executable that will return the ENC data. Remember that Salt
|
||||
will look for external nodes AND top files and combine the results if both
|
||||
are enabled and available!
|
||||
.INDENT 0.0
|
||||
.INDENT 3.5
|
||||
.sp
|
||||
.nf
|
||||
.ft C
|
||||
external_nodes: cobbler\-ext\-nodes
|
||||
.ft P
|
||||
.fi
|
||||
.UNINDENT
|
||||
.UNINDENT
|
||||
.SS \fBrenderer\fP
|
||||
.sp
|
||||
Default: \fByaml_jinja\fP
|
||||
|
@ -16006,12 +15987,6 @@ and \fBmine_functions\fP\&.
|
|||
#
|
||||
#master_tops: {}
|
||||
|
||||
# The external_nodes option allows Salt to gather data that would normally be
|
||||
# placed in a top file. The external_nodes option is the executable that will
|
||||
# return the ENC data. Remember that Salt will look for external nodes AND top
|
||||
# files and combine the results if both are enabled!
|
||||
#external_nodes: None
|
||||
|
||||
# The renderer to use on the minions to render the state data
|
||||
#renderer: yaml_jinja
|
||||
|
||||
|
|
|
@ -92,13 +92,13 @@ RunnerClient
|
|||
------------
|
||||
|
||||
.. autoclass:: salt.runner.RunnerClient
|
||||
:members: cmd, async, cmd_sync, cmd_async
|
||||
:members: cmd, asynchronous, cmd_sync, cmd_async
|
||||
|
||||
WheelClient
|
||||
-----------
|
||||
|
||||
.. autoclass:: salt.wheel.WheelClient
|
||||
:members: cmd, async, cmd_sync, cmd_async
|
||||
:members: cmd, asynchronous, cmd_sync, cmd_async
|
||||
|
||||
CloudClient
|
||||
-----------
|
||||
|
|
|
@ -472,12 +472,16 @@ communication.
|
|||
``enable_gpu_grains``
|
||||
---------------------
|
||||
|
||||
Default: ``True``
|
||||
Default: ``False``
|
||||
|
||||
Enable GPU hardware data for your master. Be aware that the master can
|
||||
take a while to start up when lspci and/or dmidecode is used to populate the
|
||||
grains for the master.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
enable_gpu_grains: True
|
||||
|
||||
.. conf_master:: job_cache
|
||||
|
||||
``job_cache``
|
||||
|
@ -1843,40 +1847,6 @@ The listen queue size of the ZeroMQ backlog.
|
|||
|
||||
zmq_backlog: 1000
|
||||
|
||||
.. conf_master:: salt_event_pub_hwm
|
||||
.. conf_master:: event_publisher_pub_hwm
|
||||
|
||||
``salt_event_pub_hwm`` and ``event_publisher_pub_hwm``
|
||||
------------------------------------------------------
|
||||
|
||||
These two ZeroMQ High Water Mark settings, ``salt_event_pub_hwm`` and
|
||||
``event_publisher_pub_hwm`` are significant for masters with thousands of
|
||||
minions. When these are insufficiently high it will manifest in random
|
||||
responses missing in the CLI and even missing from the job cache. Masters
|
||||
that have fast CPUs and many cores with appropriate ``worker_threads``
|
||||
will not need these set as high.
|
||||
|
||||
The ZeroMQ high-water-mark for the ``SaltEvent`` pub socket default is:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
salt_event_pub_hwm: 20000
|
||||
|
||||
The ZeroMQ high-water-mark for the ``EventPublisher`` pub socket default is:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
event_publisher_pub_hwm: 10000
|
||||
|
||||
As an example, on single master deployment with 8,000 minions, 2.4GHz CPUs,
|
||||
24 cores, and 32GiB memory has these settings:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
salt_event_pub_hwm: 128000
|
||||
event_publisher_pub_hwm: 64000
|
||||
|
||||
|
||||
.. _master-module-management:
|
||||
|
||||
Master Module Management
|
||||
|
@ -2059,23 +2029,6 @@ following configuration:
|
|||
master_tops:
|
||||
ext_nodes: <Shell command which returns yaml>
|
||||
|
||||
.. conf_master:: external_nodes
|
||||
|
||||
``external_nodes``
|
||||
------------------
|
||||
|
||||
Default: None
|
||||
|
||||
The external_nodes option allows Salt to gather data that would normally be
|
||||
placed in a top file from and external node controller. The external_nodes
|
||||
option is the executable that will return the ENC data. Remember that Salt
|
||||
will look for external nodes AND top files and combine the results if both
|
||||
are enabled and available!
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
external_nodes: cobbler-ext-nodes
|
||||
|
||||
.. conf_master:: renderer
|
||||
|
||||
``renderer``
|
||||
|
@ -2449,6 +2402,12 @@ Master will not be returned to the Minion.
|
|||
------------------------------
|
||||
|
||||
.. versionadded:: 2014.1.0
|
||||
.. deprecated:: 2018.3.4
|
||||
This option is now ignored. Firstly, it only traversed
|
||||
:conf_master:`file_roots`, which means it did not work for the other
|
||||
fileserver backends. Secondly, since this option was added we have added
|
||||
caching to the code that traverses the file_roots (and gitfs, etc.), which
|
||||
greatly reduces the amount of traversal that is done.
|
||||
|
||||
Default: ``False``
|
||||
|
||||
|
|
|
@ -307,6 +307,23 @@ Set to zero if the minion should shutdown and not retry.
|
|||
|
||||
retry_dns: 30
|
||||
|
||||
.. conf_minion:: retry_dns_count
|
||||
|
||||
``retry_dns_count``
|
||||
-------------------
|
||||
|
||||
.. versionadded:: 2018.3.4
|
||||
|
||||
Default: ``None``
|
||||
|
||||
Set the number of attempts to perform when resolving
|
||||
the master hostname if name resolution fails.
|
||||
By default the minion will retry indefinitely.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
retry_dns_count: 3
|
||||
|
||||
.. conf_minion:: master_port
|
||||
|
||||
``master_port``
|
||||
|
@ -892,6 +909,22 @@ The directory where Unix sockets will be kept.
|
|||
|
||||
sock_dir: /var/run/salt/minion
|
||||
|
||||
.. conf_minion:: enable_gpu_grains
|
||||
|
||||
``enable_gpu_grains``
|
||||
---------------------
|
||||
|
||||
Default: ``True``
|
||||
|
||||
Enable GPU hardware data for your master. Be aware that the minion can
|
||||
take a while to start up when lspci and/or dmidecode is used to populate the
|
||||
grains for the minion, so this can be set to ``False`` if you do not need these
|
||||
grains.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
enable_gpu_grains: False
|
||||
|
||||
.. conf_minion:: outputter_dirs
|
||||
|
||||
``outputter_dirs``
|
||||
|
|
|
@ -16,7 +16,7 @@ engine modules
|
|||
ircbot
|
||||
junos_syslog
|
||||
logentries
|
||||
logstash
|
||||
logstash_engine
|
||||
napalm_syslog
|
||||
reactor
|
||||
redis_sentinel
|
||||
|
|
|
@ -1,6 +0,0 @@
|
|||
=====================
|
||||
salt.engines.logstash
|
||||
=====================
|
||||
|
||||
.. automodule:: salt.engines.logstash
|
||||
:members:
|
6
doc/ref/engines/all/salt.engines.logstash_engine.rst
Normal file
6
doc/ref/engines/all/salt.engines.logstash_engine.rst
Normal file
|
@ -0,0 +1,6 @@
|
|||
============================
|
||||
salt.engines.logstash_engine
|
||||
============================
|
||||
|
||||
.. automodule:: salt.engines.logstash_engine
|
||||
:members:
|
|
@ -25,6 +25,7 @@ execution modules
|
|||
aix_group
|
||||
aliases
|
||||
alternatives
|
||||
ansiblegate
|
||||
apache
|
||||
apcups
|
||||
apf
|
||||
|
|
6
doc/ref/modules/all/salt.modules.ansiblegate.rst
Normal file
6
doc/ref/modules/all/salt.modules.ansiblegate.rst
Normal file
|
@ -0,0 +1,6 @@
|
|||
========================
|
||||
salt.modules.ansiblegate
|
||||
========================
|
||||
|
||||
.. automodule:: salt.modules.ansiblegate
|
||||
:members:
|
|
@ -137,7 +137,7 @@ call functions available in other execution modules.
|
|||
The variable ``__salt__`` is packed into the modules after they are loaded into
|
||||
the Salt minion.
|
||||
|
||||
The ``__salt__`` variable is a :ref:`Python dictionary <typesmapping>`
|
||||
The ``__salt__`` variable is a :ref:`Python dictionary <python:typesmapping>`
|
||||
containing all of the Salt functions. Dictionary keys are strings representing
|
||||
the names of the modules and the values are the functions themselves.
|
||||
|
||||
|
@ -176,7 +176,7 @@ Grains Data
|
|||
-----------
|
||||
|
||||
The values detected by the Salt Grains on the minion are available in a
|
||||
:ref:`Python dictionary <typesmapping>` named ``__grains__`` and can be
|
||||
:ref:`Python dictionary <python:typesmapping>` named ``__grains__`` and can be
|
||||
accessed from within callable objects in the Python modules.
|
||||
|
||||
To see the contents of the grains dictionary for a given system in your
|
||||
|
@ -288,7 +288,7 @@ Virtual module names are set using the ``__virtual__`` function and the
|
|||
``__virtual__`` Function
|
||||
========================
|
||||
|
||||
The ``__virtual__`` function returns either a :ref:`string <typesseq>`,
|
||||
The ``__virtual__`` function returns either a :ref:`string <python:typesseq>`,
|
||||
:py:data:`True`, :py:data:`False`, or :py:data:`False` with an :ref:`error
|
||||
string <modules-error-info>`. If a string is returned then the module is loaded
|
||||
using the name of the string as the virtual name. If ``True`` is returned the
|
||||
|
|
|
@ -42,6 +42,7 @@ pillar modules
|
|||
reclass_adapter
|
||||
redismod
|
||||
s3
|
||||
saltclass
|
||||
sql_base
|
||||
sqlcipher
|
||||
sqlite3
|
||||
|
@ -52,4 +53,3 @@ pillar modules
|
|||
venafi
|
||||
virtkey
|
||||
vmware_pillar
|
||||
|
||||
|
|
6
doc/ref/pillar/all/salt.pillar.saltclass.rst
Normal file
6
doc/ref/pillar/all/salt.pillar.saltclass.rst
Normal file
|
@ -0,0 +1,6 @@
|
|||
=====================
|
||||
salt.pillar.saltclass
|
||||
=====================
|
||||
|
||||
.. automodule:: salt.pillar.saltclass
|
||||
:members:
|
|
@ -13,6 +13,7 @@ state modules
|
|||
acme
|
||||
alias
|
||||
alternatives
|
||||
ansiblegate
|
||||
apache
|
||||
apache_conf
|
||||
apache_module
|
||||
|
|
6
doc/ref/states/all/salt.states.ansiblegate.rst
Normal file
6
doc/ref/states/all/salt.states.ansiblegate.rst
Normal file
|
@ -0,0 +1,6 @@
|
|||
=======================
|
||||
salt.states.ansiblegate
|
||||
=======================
|
||||
|
||||
.. automodule:: salt.states.ansiblegate
|
||||
:members:
|
|
@ -797,7 +797,7 @@ mod_python.sls
|
|||
- require_in:
|
||||
- service: httpd
|
||||
|
||||
Now the httpd server will only start if php or mod_python are first verified to
|
||||
Now the httpd server will only start if both php and mod_python are first verified to
|
||||
be installed. Thus allowing for a requisite to be defined "after the fact".
|
||||
|
||||
|
||||
|
|
|
@ -190,7 +190,7 @@ functions available in other state modules.
|
|||
The variable ``__states__`` is packed into the modules after they are loaded into
|
||||
the Salt minion.
|
||||
|
||||
The ``__states__`` variable is a :ref:`Python dictionary <typesmapping>`
|
||||
The ``__states__`` variable is a :ref:`Python dictionary <python:typesmapping>`
|
||||
containing all of the state modules. Dictionary keys are strings representing
|
||||
the names of the modules and the values are the functions themselves.
|
||||
|
||||
|
|
|
@ -14,4 +14,5 @@ master tops modules
|
|||
ext_nodes
|
||||
mongo
|
||||
reclass_adapter
|
||||
saltclass
|
||||
varstack
|
||||
|
|
6
doc/ref/tops/all/salt.tops.saltclass.rst
Normal file
6
doc/ref/tops/all/salt.tops.saltclass.rst
Normal file
|
@ -0,0 +1,6 @@
|
|||
===================
|
||||
salt.tops.saltclass
|
||||
===================
|
||||
|
||||
.. automodule:: salt.tops.saltclass
|
||||
:members:
|
|
@ -114,6 +114,12 @@ Set up the provider cloud config at ``/etc/salt/cloud.providers`` or
|
|||
|
||||
driver: gce
|
||||
|
||||
.. note::
|
||||
|
||||
Empty strings as values for ``service_account_private_key`` and ``service_account_email_address``
|
||||
can be used on GCE instances. This will result in the service account assigned to the GCE instance
|
||||
being used.
|
||||
|
||||
.. note::
|
||||
|
||||
The value provided for ``project`` must not contain underscores or spaces and
|
||||
|
|
|
@ -156,6 +156,10 @@ with their default settings listed.
|
|||
# Network interfaces, netX
|
||||
net0: name=eth0,bridge=vmbr0,ip=dhcp
|
||||
|
||||
# Public key to add to /root/.ssh/authorized_keys.
|
||||
pubkey: 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABA...'
|
||||
|
||||
|
||||
QEMU
|
||||
====
|
||||
|
||||
|
|
|
@ -526,6 +526,19 @@ GPG key with ``git`` locally, and linking the GPG key to your GitHub account.
|
|||
Once these steps are completed, the commit signing verification will look like
|
||||
the example in GitHub's `GPG Signature Verification feature announcement`_.
|
||||
|
||||
Bootstrap Script Changes
|
||||
------------------------
|
||||
|
||||
Salt's Bootstrap Script, known as `bootstrap-salt.sh`_ in the Salt repo, has it's own
|
||||
repository, contributing guidelines, and release cadence.
|
||||
|
||||
All changes to the Bootstrap Script should be made to `salt-bootstrap repo`_. Any
|
||||
pull requests made to the `bootstrap-salt.sh`_ file in the Salt repository will be
|
||||
automatically overwritten upon the next stable release of the Bootstrap Script.
|
||||
|
||||
For more information on the release process or how to contribute to the Bootstrap
|
||||
Script, see the Bootstrap Script's `Contributing Guidelines`_.
|
||||
|
||||
.. _`saltstack/salt`: https://github.com/saltstack/salt
|
||||
.. _`GitHub Fork a Repo Guide`: https://help.github.com/articles/fork-a-repo
|
||||
.. _`GitHub issue tracker`: https://github.com/saltstack/salt/issues
|
||||
|
@ -537,3 +550,6 @@ the example in GitHub's `GPG Signature Verification feature announcement`_.
|
|||
.. _GPG Probot: https://probot.github.io/apps/gpg/
|
||||
.. _help articles: https://help.github.com/articles/signing-commits-with-gpg/
|
||||
.. _GPG Signature Verification feature announcement: https://github.com/blog/2144-gpg-signature-verification
|
||||
.. _bootstrap-salt.sh: https://github.com/saltstack/salt/blob/develop/salt/cloud/deploy/bootstrap-salt.sh
|
||||
.. _salt-bootstrap repo: https://github.com/saltstack/salt-bootstrap
|
||||
.. _Contributing Guidelines: https://github.com/saltstack/salt-bootstrap/blob/develop/CONTRIBUTING.md
|
||||
|
|
|
@ -32,20 +32,9 @@ possible solution or implementation.
|
|||
|
||||
- ``Blocked`` - The issue is waiting on actions by parties outside of
|
||||
SaltStack, such as receiving more information from the submitter or
|
||||
resolution of an upstream issue. This milestone is usually applied in
|
||||
conjunction with the labels ``Info Needed``, ``Question``, ``Expected
|
||||
Behavior``, ``Won't Fix For Now``, or ``Upstream Bug``.
|
||||
|
||||
- ``Under Review`` - The issue is having further validation done by a SaltStack
|
||||
engineer.
|
||||
|
||||
- ``<Sprint>`` - The issue is being actively worked on by a SaltStack engineer.
|
||||
Sprint milestones names are constructed from the chemical symbol of the next
|
||||
release's codename and the number of sprints until that release is made. For
|
||||
example, if the next release codename is ``Neon`` and there are five sprints
|
||||
until that release, the corresponding sprint milestone will be called ``Ne
|
||||
5``. See :ref:`here <version-numbers>` for a discussion of Salt's release
|
||||
codenames.
|
||||
resolution of an upstream issue. This milestone is usually applied in
|
||||
conjunction with the labels ``Info Needed``, ``Question``,
|
||||
``Expected Behavior``, ``Won't Fix For Now``, or ``Upstream Bug``.
|
||||
|
||||
Labels
|
||||
======
|
||||
|
|
|
@ -32,6 +32,46 @@ or the integration testing factions contain respective integration or unit
|
|||
test files for Salt execution modules.
|
||||
|
||||
|
||||
.. note::
|
||||
Salt's test framework provides for the option to only run tests which
|
||||
correspond to a given file (or set of files), via the ``--from-filenames``
|
||||
argument to ``runtests.py``:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
python /path/to/runtests.py --from-filenames=salt/modules/foo.py
|
||||
|
||||
Therefore, where possible, test files should be named to match the source
|
||||
files they are testing. For example, when writing tests for
|
||||
``salt/modules/foo.py``, unit tests should go into
|
||||
``tests/unit/modules/test_foo.py``, and integration tests should go into
|
||||
``tests/integration/modules/test_foo.py``.
|
||||
|
||||
However, integration tests are organized differently from unit tests, and
|
||||
this may not always be plausible. In these cases, to ensure that the proper
|
||||
tests are run for these files, they must be mapped in
|
||||
`tests/filename_map.yml`__.
|
||||
|
||||
The filename map is used to supplement the test framework's filename
|
||||
matching logic. This allows one to ensure that states correspnding to an
|
||||
execution module are also tested when ``--from-filenames`` includes that
|
||||
execution module. It can also be used for those cases where the path to a
|
||||
test file doesn't correspond directly to the file which is being tested
|
||||
(e.g. the ``shell``, ``spm``, and ``ssh`` integration tests, among others).
|
||||
Both glob expressions and regular expressions are permitted in the filename
|
||||
map.
|
||||
|
||||
|
||||
.. important::
|
||||
Test modules which don't map directly to the source file they are
|
||||
testing (using the naming convention described above), **must** be
|
||||
added to the ``ignore`` tuple in ``tests/unit/test_module_names.py``,
|
||||
in the ``test_module_name_source_match`` function. This unit test
|
||||
ensures that we maintain the naming convention for test files.
|
||||
|
||||
.. __: https://github.com/saltstack/salt/blob/develop/tests/filename_map.yml
|
||||
|
||||
|
||||
Integration Tests
|
||||
-----------------
|
||||
|
||||
|
@ -445,8 +485,8 @@ successfully. If a network connection is not detected, the test will not run.
|
|||
order for the test to be executed. Otherwise, the test is skipped.
|
||||
|
||||
`@requires_system_grains` -- Loads and passes the grains on the system as an
|
||||
keyword argument to the test function with the name `grains`.
|
||||
|
||||
keyword argument to the test function with the name `grains`.
|
||||
|
||||
`@skip_if_binaries_missing(['list', 'of', 'binaries'])` -- If called from inside a test,
|
||||
the test will be skipped if the binaries are not all present on the system.
|
||||
|
||||
|
|
|
@ -125,8 +125,8 @@ The grains are derived by executing all of the "public" functions (i.e. those
|
|||
which do not begin with an underscore) found in the modules located in the
|
||||
Salt's core grains code, followed by those in any custom grains modules. The
|
||||
functions in a grains module must return a :ref:`Python dictionary
|
||||
<typesmapping>`, where the dictionary keys are the names of grains, and each
|
||||
key's value is that value for that grain.
|
||||
<python:typesmapping>`, where the dictionary keys are the names of grains, and
|
||||
each key's value is that value for that grain.
|
||||
|
||||
Custom grains modules should be placed in a subdirectory named ``_grains``
|
||||
located under the :conf_master:`file_roots` specified by the master config
|
||||
|
|
|
@ -177,7 +177,7 @@ Saltstack extends `builtin filters`_ with these custom filters:
|
|||
|
||||
Converts any time related object into a time based string. It requires valid
|
||||
strftime directives. An exhaustive list can be found :ref:`here
|
||||
<strftime-strptime-behavior>` in the Python documentation.
|
||||
<python:strftime-strptime-behavior>` in the Python documentation.
|
||||
|
||||
.. code-block:: jinja
|
||||
|
||||
|
|
|
@ -35,8 +35,8 @@ NAPALM
|
|||
NAPALM (Network Automation and Programmability Abstraction Layer with
|
||||
Multivendor support) is an opensourced Python library that implements a set of
|
||||
functions to interact with different router vendor devices using a unified API.
|
||||
Begin vendor-agnostic simplifies the operations, as the configuration
|
||||
and the interaction with the network device does not rely on a particular vendor.
|
||||
Being vendor-agnostic simplifies operations, as the configuration and
|
||||
interaction with the network device does not rely on a particular vendor.
|
||||
|
||||
.. image:: /_static/napalm_logo.png
|
||||
|
||||
|
|
17
doc/topics/releases/2016.11.10.rst
Normal file
17
doc/topics/releases/2016.11.10.rst
Normal file
|
@ -0,0 +1,17 @@
|
|||
=============================
|
||||
Salt 2016.11.10 Release Notes
|
||||
=============================
|
||||
|
||||
Version 2016.11.10 is a security release for :ref:`2016.11.0 <release-2016-11-0>`.
|
||||
|
||||
Changes for v2016.11.9..v2016.11.10
|
||||
-----------------------------------
|
||||
|
||||
Security Fix
|
||||
============
|
||||
|
||||
CVE-2018-15751 Remote command execution and incorrect access control when using salt-api.
|
||||
|
||||
CVE-2018-15750 Directory traversal vulnerability when using salt-api. Allows an attacker to determine what files exist on a server when querying /run or /events.
|
||||
|
||||
Credit and thanks for discovery and responsible disclosure: nullbr4in, xcuter, koredge, loupos, blackcon, Naver Business Platform
|
File diff suppressed because it is too large
Load diff
16
doc/topics/releases/2017.7.9.rst
Normal file
16
doc/topics/releases/2017.7.9.rst
Normal file
|
@ -0,0 +1,16 @@
|
|||
========================================
|
||||
In Progress: Salt 2017.7.9 Release Notes
|
||||
========================================
|
||||
|
||||
Version 2017.7.9 is an **unreleased** bugfix release for :ref:`2017.7.0 <release-2017-7-0>`.
|
||||
This release is still in progress and has not been released yet.
|
||||
|
||||
Salt Cloud Features
|
||||
===================
|
||||
|
||||
GCE Driver
|
||||
----------
|
||||
The GCE salt cloud driver can now be used with GCE instance credentials by
|
||||
setting the configuration paramaters ``service_account_private_key`` and
|
||||
``service_account_private_email`` to an empty string.
|
||||
|
|
@ -4,6 +4,34 @@
|
|||
Salt 2018.3.0 Release Notes - Codename Oxygen
|
||||
=============================================
|
||||
|
||||
.. warning::
|
||||
If you are using Jinja to dump lists or dictionaries in your SLS files,
|
||||
this will now cause errors in Python 2 since Jinja does not produce
|
||||
YAML-compatible output when strings in the data structures contain unicode
|
||||
types. The dictionary must be passed through a Jinja filter to produce
|
||||
YAML-compatible strings.
|
||||
|
||||
The below is an example of invalid SLS:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
/etc/foo.conf:
|
||||
file.mangaged:
|
||||
- source: salt://foo.conf
|
||||
- template: jinja
|
||||
- defaults: {{ mydict }}
|
||||
|
||||
To make it valid, use either one of Salt's own ``json`` or ``yaml``
|
||||
filters:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
/etc/foo.conf:
|
||||
file.mangaged:
|
||||
- source: salt://foo.conf
|
||||
- template: jinja
|
||||
- defaults: {{ mydict | json }}
|
||||
|
||||
Unicode/Python 3 Compatibility Improvements
|
||||
===========================================
|
||||
|
||||
|
|
|
@ -14,6 +14,34 @@ Statistics
|
|||
- Contributors: **55** (`Ch3LL`_, `DmitryKuzmenko`_, `Giandom`_, `Kimol`_, `L4rS6`_, `LukeCarrier`_, `OrlandoArcapix`_, `TamCore`_, `The-Loeki`_, `UtahDave`_, `aesposito91`_, `bbinet`_, `bdrung`_, `boltronics`_, `bosatsu`_, `clan`_, `corywright`_, `damon-atkins`_, `dincamihai`_, `dmurphy18`_, `dnABic`_, `douglasjreynolds`_, `dwoz`_, `edgan`_, `ejparker12`_, `esell`_, `ezh`_, `femnad`_, `folti`_, `garethgreenaway`_, `gtmanfred`_, `isbm`_, `jasperla`_, `johnj`_, `mateiw`_, `mcalmer`_, `mirceaulinic`_, `morganwillcock`_, `opdude`_, `pcn`_, `pruiz`_, `psagers`_, `psyer`_, `rallytime`_, `robinro`_, `s0undt3ch`_, `samodid`_, `shengis`_, `skjaro`_, `tankywoo`_, `terminalmage`_, `twangboy`_, `vutny`_, `yannj-fr`_, `zmedico`_)
|
||||
|
||||
|
||||
.. warning::
|
||||
If you are using Jinja to dump lists or dictionaries in your SLS files,
|
||||
this will now cause errors in Python 2 since Jinja does not produce
|
||||
YAML-compatible output when strings in the data structures contain unicode
|
||||
types. The dictionary must be passed through a Jinja filter to produce
|
||||
YAML-compatible strings.
|
||||
|
||||
The below is an example of invalid SLS:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
/etc/foo.conf:
|
||||
file.mangaged:
|
||||
- source: salt://foo.conf
|
||||
- template: jinja
|
||||
- defaults: {{ mydict }}
|
||||
|
||||
To make it valid, use either one of Salt's own ``json`` or ``yaml``
|
||||
filters:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
/etc/foo.conf:
|
||||
file.mangaged:
|
||||
- source: salt://foo.conf
|
||||
- template: jinja
|
||||
- defaults: {{ mydict | json }}
|
||||
|
||||
Tornado 5.0 Support for Python 2 Only
|
||||
=====================================
|
||||
|
||||
|
|
|
@ -26,6 +26,33 @@ Statistics
|
|||
|
||||
- Contributors: **4** (`cro`_, `garethgreenaway`_, `gtmanfred`_, `rallytime`_)
|
||||
|
||||
.. warning::
|
||||
If you are using Jinja to dump lists or dictionaries in your SLS files,
|
||||
this will now cause errors in Python 2 since Jinja does not produce
|
||||
YAML-compatible output when strings in the data structures contain unicode
|
||||
types. The dictionary must be passed through a Jinja filter to produce
|
||||
YAML-compatible strings.
|
||||
|
||||
The below is an example of invalid SLS:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
/etc/foo.conf:
|
||||
file.mangaged:
|
||||
- source: salt://foo.conf
|
||||
- template: jinja
|
||||
- defaults: {{ mydict }}
|
||||
|
||||
To make it valid, use either one of Salt's own ``json`` or ``yaml``
|
||||
filters:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
/etc/foo.conf:
|
||||
file.mangaged:
|
||||
- source: salt://foo.conf
|
||||
- template: jinja
|
||||
- defaults: {{ mydict | json }}
|
||||
|
||||
Changelog for v2018.3.1..v2018.3.2
|
||||
==================================
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
Salt 2018.3.3 Release Notes
|
||||
===========================
|
||||
|
||||
Version 2018.3.3 is a bugfix release for :ref:`2018.3.0 <release-2018-3-0>`.
|
||||
Version 2018.3.3 is a security and bugfix release for :ref:`2018.3.0 <release-2018-3-0>`.
|
||||
|
||||
Statistics
|
||||
==========
|
||||
|
@ -14,6 +14,44 @@ Statistics
|
|||
- Contributors: **55** (`Ch3LL`_, `FedericoCeratto`_, `KaiSforza`_, `L4rS6`_, `Lutseslav`_, `The-Loeki`_, `Vaelatern`_, `admd`_, `aesposito91`_, `asenci`_, `astorath`_, `azelezni`_, `babs`_, `bbczeuz`_, `bbinet`_, `brejoc`_, `cro`_, `daa`_, `dmurphy18`_, `dubb-b`_, `dwoz`_, `eliasp`_, `ezh`_, `garethgreenaway`_, `gtmanfred`_, `isbm`_, `jeduardo`_, `kt97679`_, `kuetrzi`_, `linoplt`_, `lomeroe`_, `lusche`_, `mateiw`_, `max-arnold`_, `maxim-sermin`_, `meaksh`_, `mmulqueen`_, `morganwillcock`_, `mtorromeo`_, `nullify005`_, `paulcollinsiii`_, `pritambaral`_, `rallytime`_, `rares-pop`_, `rmarchei`_, `rosscdh`_, `sizgiyaev`_, `sjorge`_, `t0fik`_, `terminalmage`_, `travispaul`_, `twangboy`_, `vinian`_, `weswhet`_, `zerthimon`_)
|
||||
|
||||
|
||||
.. warning::
|
||||
If you are using Jinja to dump lists or dictionaries in your SLS files,
|
||||
this will now cause errors in Python 2 since Jinja does not produce
|
||||
YAML-compatible output when strings in the data structures contain unicode
|
||||
types. The dictionary must be passed through a Jinja filter to produce
|
||||
YAML-compatible strings.
|
||||
|
||||
The below is an example of invalid SLS:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
/etc/foo.conf:
|
||||
file.mangaged:
|
||||
- source: salt://foo.conf
|
||||
- template: jinja
|
||||
- defaults: {{ mydict }}
|
||||
|
||||
To make it valid, use either one of Salt's own ``json`` or ``yaml``
|
||||
filters. Another option would be to use Jinja's :ref:`tojson
|
||||
<release-2018-3-3-tojson-filter>` filter.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
/etc/foo.conf:
|
||||
file.mangaged:
|
||||
- source: salt://foo.conf
|
||||
- template: jinja
|
||||
- defaults: {{ mydict | tojson }}
|
||||
|
||||
Security Fix
|
||||
============
|
||||
|
||||
CVE-2018-15751 Remote command execution and incorrect access control when using salt-api.
|
||||
|
||||
CVE-2018-15750 Directory traversal vulnerability when using salt-api. Allows an attacker to determine what files exist on a server when querying /run or /events.
|
||||
|
||||
Credit and thanks for discovery and responsible disclosure: nullbr4in, xcuter, koredge, loupos, blackcon, Naver Business Platform
|
||||
|
||||
Changes to win_timezone
|
||||
=======================
|
||||
|
||||
|
@ -24,6 +62,8 @@ Improves timezone detection by using the pytz module.
|
|||
Adds ``timezone.list`` to list supported timezones in either Windows or Unix
|
||||
format.
|
||||
|
||||
.. _release-2018-3-3-tojson-filter:
|
||||
|
||||
New Jinja Filter
|
||||
================
|
||||
|
||||
|
@ -31,15 +71,15 @@ The :jinja_ref:`tojson` filter (from Jinja 2.9 and later) has been ported to
|
|||
Salt, and will be used when this filter is not available. This allows older LTS
|
||||
releases such as CentOS 7 and Ubuntu 14.04 to use this filter.
|
||||
|
||||
You should use this filter any time you wish to dump a list or dictionary into
|
||||
an SLS file, to ensure that the result is able to be loaded by the YAML
|
||||
renderer. For example:
|
||||
You can use this filter any time you wish to dump a list or dictionary into an
|
||||
SLS file, to ensure that the result is able to be loaded by the YAML renderer.
|
||||
For example:
|
||||
|
||||
.. code-block:: jinja
|
||||
|
||||
foo:
|
||||
bar.baz:
|
||||
- some_arg: {{ mydict|tojson }}
|
||||
- some_arg: {{ mydict | tojson }}
|
||||
|
||||
MacOSX escape characters with runas
|
||||
===================================
|
||||
|
@ -54,7 +94,7 @@ Example:
|
|||
cmd.run 'echo '\''h=\"baz\"'\''' runas=macuser
|
||||
|
||||
Changelog for v2018.3.2..v2018.3.3
|
||||
=================================================================
|
||||
==================================
|
||||
|
||||
*Generated at: 2018-09-21 17:45:27 UTC*
|
||||
|
||||
|
@ -469,7 +509,7 @@ Changelog for v2018.3.2..v2018.3.3
|
|||
|
||||
* 3d26affa10 Fix remaining file state integration tests (py3)
|
||||
|
||||
* **PR** `#49171`_: (`Ch3LL`_) [2018.3.3] cherry pick `#49103`_
|
||||
* **PR** `#49171`_: (`Ch3LL`_) [2018.3.3] cherry pick `#49103`_
|
||||
@ *2018-08-17 20:23:32 UTC*
|
||||
|
||||
* **PR** `#49103`_: (`dwoz`_) Install the launcher so we can execute py files (refs: `#49171`_)
|
||||
|
@ -1592,7 +1632,7 @@ Changelog for v2018.3.2..v2018.3.3
|
|||
|
||||
* **ISSUE** `#46896`_: (`Poil`_) Proxy + file.managed => Comment: Failed to cache xxx invalid arguments to setopt (refs: `#48754`_)
|
||||
|
||||
* **PR** `#48754`_: (`lomeroe`_) send proxy/ca_cert parameters as strings (not unicode) to tornado httpclient
|
||||
* **PR** `#48754`_: (`lomeroe`_) send proxy/ca_cert parameters as strings (not unicode) to tornado httpclient
|
||||
@ *2018-07-25 14:55:42 UTC*
|
||||
|
||||
* 030c921914 Merge pull request `#48754`_ from lomeroe/fix-tornado-proxy
|
||||
|
@ -3037,7 +3077,7 @@ Changelog for v2018.3.2..v2018.3.3
|
|||
|
||||
* dae65da256 Merge branch '2018.3.1' into '2018.3'
|
||||
|
||||
* **PR** `#48186`_: (`rallytime`_) Add autodoc module for saltcheck.py
|
||||
* **PR** `#48186`_: (`rallytime`_) Add autodoc module for saltcheck.py
|
||||
@ *2018-06-19 19:03:55 UTC*
|
||||
|
||||
* 5b4897f050 Merge pull request `#48186`_ from rallytime/saltcheck-docs
|
||||
|
@ -3324,11 +3364,11 @@ Changelog for v2018.3.2..v2018.3.3
|
|||
* **PR** `#48109`_: (`rallytime`_) Back-port `#47851`_ to 2018.3
|
||||
@ *2018-06-14 13:09:04 UTC*
|
||||
|
||||
* **PR** `#47851`_: (`rares-pop`_) Fixup! add master.py:FileserverUpdate **kwargs (refs: `#48109`_)
|
||||
* **PR** `#47851`_: (`rares-pop`_) Fixup! add master.py:FileserverUpdate \*\*kwargs (refs: `#48109`_)
|
||||
|
||||
* 2902ee0b14 Merge pull request `#48109`_ from rallytime/bp-47851
|
||||
|
||||
* e9dc30bf8e Fixup! add master.py:FileserverUpdate **kwargs
|
||||
* e9dc30bf8e Fixup! add master.py:FileserverUpdate \*\*kwargs
|
||||
|
||||
* **ISSUE** `#47925`_: (`JonGriggs`_) GitFS looking for files in the master branch only (refs: `#47943`_)
|
||||
|
||||
|
@ -3339,7 +3379,7 @@ Changelog for v2018.3.2..v2018.3.3
|
|||
|
||||
* 534e1a7100 Merge branch '2018.3' into issue47925
|
||||
|
||||
* **PR** `#48089`_: (`rallytime`_) Update release versions for the 2018.3 branch
|
||||
* **PR** `#48089`_: (`rallytime`_) Update release versions for the 2018.3 branch
|
||||
@ *2018-06-13 14:03:44 UTC*
|
||||
|
||||
* 9e1d0040e4 Merge pull request `#48089`_ from rallytime/update_version_doc_2018.3
|
||||
|
|
14
doc/topics/releases/2018.3.4.rst
Normal file
14
doc/topics/releases/2018.3.4.rst
Normal file
|
@ -0,0 +1,14 @@
|
|||
========================================
|
||||
In Progress: Salt 2018.3.4 Release Notes
|
||||
========================================
|
||||
|
||||
Version 2018.3.4 is an **unreleased** bugfix release for :ref:`2018.3.0 <release-2018-3-0>`.
|
||||
This release is still in progress and has not been released yet.
|
||||
|
||||
|
||||
State Changes
|
||||
=============
|
||||
|
||||
- The :py:func:`host.present <salt.states.host.present>` state can now remove
|
||||
the specified hostname from IPs not specified in the state. This can be done
|
||||
by setting the newly-added ``clean`` argument to ``True``.
|
|
@ -36,6 +36,8 @@ Assigned codenames:
|
|||
- Nitrogen: ``2017.7.0``
|
||||
- Oxygen: ``2018.3.0``
|
||||
- Fluorine: ``TBD``
|
||||
- Neon: ``TBD``
|
||||
- Sodium: ``TBD``
|
||||
|
||||
Example
|
||||
-------
|
||||
|
|
|
@ -77,11 +77,11 @@ deeply-nested dict can be declared with curly braces:
|
|||
- group: root
|
||||
- mode: 644
|
||||
- template: jinja
|
||||
- context:
|
||||
custom_var: "override"
|
||||
- defaults:
|
||||
custom_var: "default value"
|
||||
other_var: 123
|
||||
- context: {
|
||||
custom_var: "override" }
|
||||
- defaults: {
|
||||
custom_var: "default value",
|
||||
other_var: 123 }
|
||||
|
||||
Here is a more concrete example of how YAML actually handles these
|
||||
indentations, using the Python interpreter on the command line:
|
||||
|
|
|
@ -74,15 +74,15 @@ be overridden with the ``method`` argument:
|
|||
salt.utils.http.query('http://example.com/delete/url', 'DELETE')
|
||||
|
||||
When using the ``POST`` method (and others, such as ``PUT``), extra data is usually
|
||||
sent as well. This data can be sent directly, in whatever format is
|
||||
required by the remote server (XML, JSON, plain text, etc).
|
||||
sent as well. This data can be sent directly (would be URL encoded when necessary),
|
||||
or in whatever format is required by the remote server (XML, JSON, plain text, etc).
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
salt.utils.http.query(
|
||||
'http://example.com/delete/url',
|
||||
'http://example.com/post/url',
|
||||
method='POST',
|
||||
data=json.loads(mydict)
|
||||
data=json.dumps(mydict)
|
||||
)
|
||||
|
||||
Data Formatting and Templating
|
||||
|
|
|
@ -4,386 +4,40 @@
|
|||
Salt Bootstrap
|
||||
==============
|
||||
|
||||
The Salt Bootstrap script allows for a user to install the Salt Minion or
|
||||
Master on a variety of system distributions and versions. This shell script
|
||||
known as ``bootstrap-salt.sh`` runs through a series of checks to determine
|
||||
the operating system type and version. It then installs the Salt binaries
|
||||
using the appropriate methods. The Salt Bootstrap script installs the
|
||||
minimum number of packages required to run Salt. This means that in the event
|
||||
you run the bootstrap to install via package, Git will not be installed.
|
||||
Installing the minimum number of packages helps ensure the script stays as
|
||||
lightweight as possible, assuming the user will install any other required
|
||||
packages after the Salt binaries are present on the system. The script source
|
||||
is available on GitHub: https://github.com/saltstack/salt-bootstrap
|
||||
The Salt Bootstrap Script allows a user to install the Salt Minion or Master
|
||||
on a variety of system distributions and versions.
|
||||
|
||||
The Salt Bootstrap Script is a shell script is known as ``bootstrap-salt.sh``.
|
||||
It runs through a series of checks to determine the operating system type and
|
||||
version. It then installs the Salt binaries using the appropriate methods.
|
||||
|
||||
Supported Operating Systems
|
||||
---------------------------
|
||||
The Salt Bootstrap Script installs the minimum number of packages required to
|
||||
run Salt. This means that in the event you run the bootstrap to install via
|
||||
package, Git will not be installed. Installing the minimum number of packages
|
||||
helps ensure the script stays as lightweight as possible, assuming the user
|
||||
will install any other required packages after the Salt binaries are present
|
||||
on the system.
|
||||
|
||||
The Salt Bootstrap Script is maintained in a separate repo from Salt, complete
|
||||
with its own issues, pull requests, contributing guidelines, release protocol,
|
||||
etc.
|
||||
|
||||
To learn more, please see the Salt Bootstrap repo links:
|
||||
|
||||
- `Salt Bootstrap repo`_
|
||||
- `README`_: includes supported operating systems, example usage, and more.
|
||||
- `Contributing Guidelines`_
|
||||
- `Release Process`_
|
||||
|
||||
.. note::
|
||||
|
||||
In the event you do not see your distribution or version available please
|
||||
review the develop branch on GitHub as it may contain updates that are
|
||||
not present in the stable release:
|
||||
https://github.com/saltstack/salt-bootstrap/tree/develop
|
||||
|
||||
|
||||
Debian and derivatives
|
||||
~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
- Debian GNU/Linux 7/8
|
||||
- Linux Mint Debian Edition 1 (based on Debian 8)
|
||||
- Kali Linux 1.0 (based on Debian 7)
|
||||
|
||||
|
||||
Red Hat family
|
||||
~~~~~~~~~~~~~~
|
||||
|
||||
- Amazon Linux 2012.09/2013.03/2013.09/2014.03/2014.09
|
||||
- CentOS 5/6/7
|
||||
- Fedora 17/18/20/21/22
|
||||
- Oracle Linux 5/6/7
|
||||
- Red Hat Enterprise Linux 5/6/7
|
||||
- Scientific Linux 5/6/7
|
||||
|
||||
|
||||
SUSE family
|
||||
~~~~~~~~~~~
|
||||
|
||||
- openSUSE 12/13
|
||||
- openSUSE Leap 42
|
||||
- openSUSE Tumbleweed 2015
|
||||
- SUSE Linux Enterprise Server 11 SP1/11 SP2/11 SP3/12
|
||||
|
||||
|
||||
Ubuntu and derivatives
|
||||
~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
- Elementary OS 0.2 (based on Ubuntu 12.04)
|
||||
- Linaro 12.04
|
||||
- Linux Mint 13/14/16/17
|
||||
- Trisquel GNU/Linux 6 (based on Ubuntu 12.04)
|
||||
- Ubuntu 10.x/11.x/12.x/13.x/14.x/15.x/16.x
|
||||
|
||||
|
||||
Other Linux distro
|
||||
~~~~~~~~~~~~~~~~~~
|
||||
|
||||
- Arch Linux
|
||||
- Gentoo
|
||||
|
||||
|
||||
UNIX systems
|
||||
~~~~~~~~~~~~
|
||||
|
||||
**BSD**:
|
||||
|
||||
- OpenBSD
|
||||
- FreeBSD 9/10/11
|
||||
|
||||
**SunOS**:
|
||||
|
||||
- SmartOS
|
||||
|
||||
|
||||
Example Usage
|
||||
-------------
|
||||
|
||||
If you're looking for the *one-liner* to install Salt, please scroll to the
|
||||
bottom and use the instructions for `Installing via an Insecure One-Liner`_
|
||||
|
||||
.. note::
|
||||
|
||||
In every two-step example, you would be well-served to examine the downloaded file and examine
|
||||
it to ensure that it does what you expect.
|
||||
|
||||
|
||||
The Salt Bootstrap script has a wide variety of options that can be passed as
|
||||
well as several ways of obtaining the bootstrap script itself.
|
||||
|
||||
.. note::
|
||||
|
||||
These examples below show how to bootstrap Salt directly from GitHub or other Git repository.
|
||||
Run the script without any parameters to get latest stable Salt packages for your system from
|
||||
`SaltStack corporate repository`_. See first example in the `Install using wget`_ section.
|
||||
|
||||
.. _`SaltStack corporate repository`: https://repo.saltstack.com/
|
||||
|
||||
|
||||
Install using curl
|
||||
~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Using ``curl`` to install latest development version from GitHub:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
curl -o bootstrap-salt.sh -L https://bootstrap.saltstack.com
|
||||
sudo sh bootstrap-salt.sh git develop
|
||||
|
||||
If you want to install a specific release version (based on the Git tags):
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
curl -o bootstrap-salt.sh -L https://bootstrap.saltstack.com
|
||||
sudo sh bootstrap-salt.sh git v2015.8.8
|
||||
|
||||
To install a specific branch from a Git fork:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
curl -o bootstrap-salt.sh -L https://bootstrap.saltstack.com
|
||||
sudo sh bootstrap-salt.sh -g https://github.com/myuser/salt.git git mybranch
|
||||
|
||||
If all you want is to install a ``salt-master`` using latest Git:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
curl -o bootstrap-salt.sh -L https://bootstrap.saltstack.com
|
||||
sudo sh bootstrap-salt.sh -M -N git develop
|
||||
|
||||
If your host has Internet access only via HTTP proxy:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
PROXY='http://user:password@myproxy.example.com:3128'
|
||||
curl -o bootstrap-salt.sh -L -x "$PROXY" https://bootstrap.saltstack.com
|
||||
sudo sh bootstrap-salt.sh -G -H "$PROXY" git
|
||||
|
||||
|
||||
Install using wget
|
||||
~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Using ``wget`` to install your distribution's stable packages:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
wget -O bootstrap-salt.sh https://bootstrap.saltstack.com
|
||||
sudo sh bootstrap-salt.sh
|
||||
|
||||
Downloading the script from develop branch:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
wget -O bootstrap-salt.sh https://bootstrap.saltstack.com/develop
|
||||
sudo sh bootstrap-salt.sh
|
||||
|
||||
Installing a specific version from git using ``wget``:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
wget -O bootstrap-salt.sh https://bootstrap.saltstack.com
|
||||
sudo sh bootstrap-salt.sh -P git v2015.8.8
|
||||
|
||||
.. note::
|
||||
|
||||
On the above example we added `-P` which will allow PIP packages to be installed if required but
|
||||
it's not a necessary flag for Git based bootstraps.
|
||||
|
||||
|
||||
Install using Python
|
||||
~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
If you already have Python installed, ``python 2.6``, then it's as easy as:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
python -m urllib "https://bootstrap.saltstack.com" > bootstrap-salt.sh
|
||||
sudo sh bootstrap-salt.sh git develop
|
||||
|
||||
All Python versions should support the following in-line code:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
python -c 'import urllib; print urllib.urlopen("https://bootstrap.saltstack.com").read()' > bootstrap-salt.sh
|
||||
sudo sh bootstrap-salt.sh git develop
|
||||
|
||||
|
||||
Install using fetch
|
||||
~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
On a FreeBSD base system you usually don't have either of the above binaries available. You **do**
|
||||
have ``fetch`` available though:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
fetch -o bootstrap-salt.sh https://bootstrap.saltstack.com
|
||||
sudo sh bootstrap-salt.sh
|
||||
|
||||
If you have any SSL issues install ``ca_root_nssp``:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
pkg install ca_root_nssp
|
||||
|
||||
And either copy the certificates to the place where fetch can find them:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
cp /usr/local/share/certs/ca-root-nss.crt /etc/ssl/cert.pem
|
||||
|
||||
Or link them to the right place:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
ln -s /usr/local/share/certs/ca-root-nss.crt /etc/ssl/cert.pem
|
||||
|
||||
|
||||
Installing via an Insecure One-Liner
|
||||
------------------------------------
|
||||
|
||||
The following examples illustrate how to install Salt via a one-liner.
|
||||
|
||||
.. note::
|
||||
|
||||
Warning! These methods do not involve a verification step and assume that
|
||||
the delivered file is trustworthy.
|
||||
|
||||
|
||||
Any of the example above which use two-lines can be made to run in a single-line
|
||||
configuration with minor modifications.
|
||||
|
||||
For example, using ``curl`` to install your distribution's stable packages:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
curl -L https://bootstrap.saltstack.com | sudo sh
|
||||
|
||||
|
||||
Using ``wget`` to install your distribution's stable packages:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
wget -O - https://bootstrap.saltstack.com | sudo sh
|
||||
|
||||
|
||||
Installing the latest develop branch of Salt:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
curl -L https://bootstrap.saltstack.com | sudo sh -s -- git develop
|
||||
|
||||
|
||||
Command Line Options
|
||||
--------------------
|
||||
|
||||
Here's a summary of the command line options:
|
||||
|
||||
.. code-block:: text
|
||||
|
||||
$ sh bootstrap-salt.sh -h
|
||||
|
||||
Installation types:
|
||||
- stable Install latest stable release. This is the default
|
||||
install type
|
||||
- stable [branch] Install latest version on a branch. Only supported
|
||||
for packages available at repo.saltstack.com
|
||||
- stable [version] Install a specific version. Only supported for
|
||||
packages available at repo.saltstack.com
|
||||
- daily Ubuntu specific: configure SaltStack Daily PPA
|
||||
- testing RHEL-family specific: configure EPEL testing repo
|
||||
- git Install from the head of the develop branch
|
||||
- git [ref] Install from any git ref (such as a branch, tag, or
|
||||
commit)
|
||||
|
||||
Examples:
|
||||
- bootstrap-salt.sh
|
||||
- bootstrap-salt.sh stable
|
||||
- bootstrap-salt.sh stable 2017.7
|
||||
- bootstrap-salt.sh stable 2017.7.2
|
||||
- bootstrap-salt.sh daily
|
||||
- bootstrap-salt.sh testing
|
||||
- bootstrap-salt.sh git
|
||||
- bootstrap-salt.sh git 2017.7
|
||||
- bootstrap-salt.sh git v2017.7.2
|
||||
- bootstrap-salt.sh git 06f249901a2e2f1ed310d58ea3921a129f214358
|
||||
|
||||
Options:
|
||||
-h Display this message
|
||||
-v Display script version
|
||||
-n No colours
|
||||
-D Show debug output
|
||||
-c Temporary configuration directory
|
||||
-g Salt Git repository URL. Default: https://github.com/saltstack/salt.git
|
||||
-w Install packages from downstream package repository rather than
|
||||
upstream, saltstack package repository. This is currently only
|
||||
implemented for SUSE.
|
||||
-k Temporary directory holding the minion keys which will pre-seed
|
||||
the master.
|
||||
-s Sleep time used when waiting for daemons to start, restart and when
|
||||
checking for the services running. Default: 3
|
||||
-L Also install salt-cloud and required python-libcloud package
|
||||
-M Also install salt-master
|
||||
-S Also install salt-syndic
|
||||
-N Do not install salt-minion
|
||||
-X Do not start daemons after installation
|
||||
-d Disables checking if Salt services are enabled to start on system boot.
|
||||
You can also do this by touching /tmp/disable_salt_checks on the target
|
||||
host. Default: ${BS_FALSE}
|
||||
-P Allow pip based installations. On some distributions the required salt
|
||||
packages or its dependencies are not available as a package for that
|
||||
distribution. Using this flag allows the script to use pip as a last
|
||||
resort method. NOTE: This only works for functions which actually
|
||||
implement pip based installations.
|
||||
-U If set, fully upgrade the system prior to bootstrapping Salt
|
||||
-I If set, allow insecure connections while downloading any files. For
|
||||
example, pass '--no-check-certificate' to 'wget' or '--insecure' to
|
||||
'curl'. On Debian and Ubuntu, using this option with -U allows one to obtain
|
||||
GnuPG archive keys insecurely if distro has changed release signatures.
|
||||
-F Allow copied files to overwrite existing (config, init.d, etc)
|
||||
-K If set, keep the temporary files in the temporary directories specified
|
||||
with -c and -k
|
||||
-C Only run the configuration function. Implies -F (forced overwrite).
|
||||
To overwrite Master or Syndic configs, -M or -S, respectively, must
|
||||
also be specified. Salt installation will be omitted, but some of the
|
||||
dependencies could be installed to write configuration with -j or -J.
|
||||
-A Pass the salt-master DNS name or IP. This will be stored under
|
||||
${BS_SALT_ETC_DIR}/minion.d/99-master-address.conf
|
||||
-i Pass the salt-minion id. This will be stored under
|
||||
${BS_SALT_ETC_DIR}/minion_id
|
||||
-p Extra-package to install while installing Salt dependencies. One package
|
||||
per -p flag. You're responsible for providing the proper package name.
|
||||
-H Use the specified HTTP proxy for all download URLs (including https://).
|
||||
For example: http://myproxy.example.com:3128
|
||||
-Z Enable additional package repository for newer ZeroMQ
|
||||
(only available for RHEL/CentOS/Fedora/Ubuntu based distributions)
|
||||
-b Assume that dependencies are already installed and software sources are
|
||||
set up. If git is selected, git tree is still checked out as dependency
|
||||
step.
|
||||
-f Force shallow cloning for git installations.
|
||||
This may result in an "n/a" in the version number.
|
||||
-l Disable ssl checks. When passed, switches "https" calls to "http" where
|
||||
possible.
|
||||
-V Install Salt into virtualenv
|
||||
(only available for Ubuntu based distributions)
|
||||
-a Pip install all Python pkg dependencies for Salt. Requires -V to install
|
||||
all pip pkgs into the virtualenv.
|
||||
(Only available for Ubuntu based distributions)
|
||||
-r Disable all repository configuration performed by this script. This
|
||||
option assumes all necessary repository configuration is already present
|
||||
on the system.
|
||||
-R Specify a custom repository URL. Assumes the custom repository URL
|
||||
points to a repository that mirrors Salt packages located at
|
||||
repo.saltstack.com. The option passed with -R replaces the
|
||||
"repo.saltstack.com". If -R is passed, -r is also set. Currently only
|
||||
works on CentOS/RHEL and Debian based distributions.
|
||||
-J Replace the Master config file with data passed in as a JSON string. If
|
||||
a Master config file is found, a reasonable effort will be made to save
|
||||
the file with a ".bak" extension. If used in conjunction with -C or -F,
|
||||
no ".bak" file will be created as either of those options will force
|
||||
a complete overwrite of the file.
|
||||
-j Replace the Minion config file with data passed in as a JSON string. If
|
||||
a Minion config file is found, a reasonable effort will be made to save
|
||||
the file with a ".bak" extension. If used in conjunction with -C or -F,
|
||||
no ".bak" file will be created as either of those options will force
|
||||
a complete overwrite of the file.
|
||||
-q Quiet salt installation from git (setup.py install -q)
|
||||
-x Changes the python version used to install a git version of salt. Currently
|
||||
this is considered experimental and has only been tested on Centos 6. This
|
||||
only works for git installations.
|
||||
-y Installs a different python version on host. Currently this has only been
|
||||
tested with Centos 6 and is considered experimental. This will install the
|
||||
ius repo on the box if disable repo is false. This must be used in conjunction
|
||||
with -x <pythonversion>. For example:
|
||||
sh bootstrap.sh -P -y -x python2.7 git v2016.11.3
|
||||
The above will install python27 and install the git version of salt using the
|
||||
python2.7 executable. This only works for git and pip installations.
|
||||
The Salt Bootstrap script can be found in the Salt repo under the
|
||||
``salt/cloud/deploy/bootstrap-salt.sh`` path. Any changes to this file
|
||||
will be overwritten! Bug fixes and feature additions must be submitted
|
||||
via the `Salt Bootstrap repo`_. Please see the Salt Bootstrap Script's
|
||||
`Release Process`_ for more information.
|
||||
|
||||
.. _Salt Bootstrap repo: https://github.com/saltstack/salt-bootstrap
|
||||
.. _README: https://github.com/saltstack/salt-bootstrap#bootstrapping-salt
|
||||
.. _Contributing Guidelines: https://github.com/saltstack/salt-bootstrap/blob/develop/CONTRIBUTING.md
|
||||
.. _Release Process: https://github.com/saltstack/salt-bootstrap/blob/develop/CONTRIBUTING.md#release-information
|
||||
|
|
|
@ -113,8 +113,14 @@ if not %errorLevel%==0 (
|
|||
:: Remove build and dist directories
|
||||
@echo %0 :: Remove build and dist directories...
|
||||
@echo ---------------------------------------------------------------------
|
||||
rd /s /q "%SrcDir%\build"
|
||||
rd /s /q "%SrcDir%\dist"
|
||||
"%PyDir%\python.exe" "%SrcDir%\setup.py" clean --all
|
||||
if not %errorLevel%==0 (
|
||||
goto eof
|
||||
)
|
||||
If Exist "%SrcDir%\dist" (
|
||||
@echo removing %SrcDir%\dist
|
||||
rd /S /Q "%SrcDir%\dist"
|
||||
)
|
||||
@echo.
|
||||
|
||||
:: Install Current Version of salt
|
||||
|
|
|
@ -267,14 +267,12 @@ Write-Output " - $script_name :: Copying DLLs . . ."
|
|||
Write-Output " ----------------------------------------------------------------"
|
||||
# Architecture Specific DLL's
|
||||
ForEach($key in $ini[$bitDLLs].Keys) {
|
||||
If ($arrInstalled -notcontains $key) {
|
||||
Write-Output " - $key . . ."
|
||||
$file = "$($ini[$bitDLLs][$key])"
|
||||
$url = "$($ini['Settings']['SaltRepo'])/$bitFolder/$file"
|
||||
$file = "$($ini['Settings']['DownloadDir'])\$bitFolder\$file"
|
||||
DownloadFileWithProgress $url $file
|
||||
Copy-Item $file -destination $($ini['Settings']['Python2Dir'])
|
||||
}
|
||||
Write-Output " - $key . . ."
|
||||
$file = "$($ini[$bitDLLs][$key])"
|
||||
$url = "$($ini['Settings']['SaltRepo'])/$bitFolder/$file"
|
||||
$file = "$($ini['Settings']['DownloadDir'])\$bitFolder\$file"
|
||||
DownloadFileWithProgress $url $file
|
||||
Copy-Item $file -destination $($ini['Settings']['Python2Dir'])
|
||||
}
|
||||
|
||||
#------------------------------------------------------------------------------
|
||||
|
|
|
@ -276,14 +276,12 @@ Write-Output " - $script_name :: Copying DLLs . . ."
|
|||
Write-Output " ----------------------------------------------------------------"
|
||||
# Architecture Specific DLL's
|
||||
ForEach($key in $ini[$bitDLLs].Keys) {
|
||||
If ($arrInstalled -notcontains $key) {
|
||||
Write-Output " - $key . . ."
|
||||
$file = "$($ini[$bitDLLs][$key])"
|
||||
$url = "$($ini['Settings']['SaltRepo'])/$bitFolder/$file"
|
||||
$file = "$($ini['Settings']['DownloadDir'])\$bitFolder\$file"
|
||||
DownloadFileWithProgress $url $file
|
||||
Copy-Item $file -destination $($ini['Settings']['Python3Dir'])
|
||||
}
|
||||
Write-Output " - $key . . ."
|
||||
$file = "$($ini[$bitDLLs][$key])"
|
||||
$url = "$($ini['Settings']['SaltRepo'])/$bitFolder/$file"
|
||||
$file = "$($ini['Settings']['DownloadDir'])\$bitFolder\$file"
|
||||
DownloadFileWithProgress $url $file
|
||||
Copy-Item $file -destination $($ini['Settings']['Python3Dir'])
|
||||
}
|
||||
|
||||
#------------------------------------------------------------------------------
|
||||
|
|
|
@ -4,7 +4,7 @@ mock>=2.0.0
|
|||
SaltPyLint>=v2017.3.6
|
||||
pytest>=3.5.0
|
||||
git+https://github.com/saltstack/pytest-salt.git@master#egg=pytest-salt
|
||||
testinfra>=1.7.0
|
||||
testinfra>=1.7.0,!=1.17.0
|
||||
|
||||
# httpretty Needs to be here for now even though it's a dependency of boto.
|
||||
# A pip install on a fresh system will decide to target httpretty 0.8.10 to
|
||||
|
|
293
salt/_compat.py
293
salt/_compat.py
|
@ -2,18 +2,21 @@
|
|||
'''
|
||||
Salt compatibility code
|
||||
'''
|
||||
# pylint: disable=import-error,unused-import,invalid-name
|
||||
# pylint: disable=import-error,unused-import,invalid-name,W0231,W0233
|
||||
|
||||
# Import python libs
|
||||
from __future__ import absolute_import
|
||||
from __future__ import absolute_import, unicode_literals, print_function
|
||||
import sys
|
||||
import types
|
||||
import logging
|
||||
|
||||
# Import 3rd-party libs
|
||||
from salt.ext.six import binary_type, string_types, text_type
|
||||
from salt.exceptions import SaltException
|
||||
from salt.ext.six import binary_type, string_types, text_type, integer_types
|
||||
from salt.ext.six.moves import cStringIO, StringIO
|
||||
|
||||
HAS_XML = True
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
try:
|
||||
# Python >2.5
|
||||
import xml.etree.cElementTree as ElementTree
|
||||
|
@ -31,11 +34,10 @@ except Exception:
|
|||
import elementtree.ElementTree as ElementTree
|
||||
except Exception:
|
||||
ElementTree = None
|
||||
HAS_XML = False
|
||||
|
||||
|
||||
# True if we are running on Python 3.
|
||||
PY3 = sys.version_info[0] == 3
|
||||
PY3 = sys.version_info.major == 3
|
||||
|
||||
|
||||
if PY3:
|
||||
|
@ -45,13 +47,12 @@ else:
|
|||
import exceptions
|
||||
|
||||
|
||||
if HAS_XML:
|
||||
if ElementTree is not None:
|
||||
if not hasattr(ElementTree, 'ParseError'):
|
||||
class ParseError(Exception):
|
||||
'''
|
||||
older versions of ElementTree do not have ParseError
|
||||
'''
|
||||
pass
|
||||
|
||||
ElementTree.ParseError = ParseError
|
||||
|
||||
|
@ -61,9 +62,7 @@ def text_(s, encoding='latin-1', errors='strict'):
|
|||
If ``s`` is an instance of ``binary_type``, return
|
||||
``s.decode(encoding, errors)``, otherwise return ``s``
|
||||
'''
|
||||
if isinstance(s, binary_type):
|
||||
return s.decode(encoding, errors)
|
||||
return s
|
||||
return s.decode(encoding, errors) if isinstance(s, binary_type) else s
|
||||
|
||||
|
||||
def bytes_(s, encoding='latin-1', errors='strict'):
|
||||
|
@ -71,57 +70,37 @@ def bytes_(s, encoding='latin-1', errors='strict'):
|
|||
If ``s`` is an instance of ``text_type``, return
|
||||
``s.encode(encoding, errors)``, otherwise return ``s``
|
||||
'''
|
||||
return s.encode(encoding, errors) if isinstance(s, text_type) else s
|
||||
|
||||
|
||||
def ascii_native_(s):
|
||||
'''
|
||||
Python 3: If ``s`` is an instance of ``text_type``, return
|
||||
``s.encode('ascii')``, otherwise return ``str(s, 'ascii', 'strict')``
|
||||
|
||||
Python 2: If ``s`` is an instance of ``text_type``, return
|
||||
``s.encode('ascii')``, otherwise return ``str(s)``
|
||||
'''
|
||||
if isinstance(s, text_type):
|
||||
return s.encode(encoding, errors)
|
||||
return s
|
||||
s = s.encode('ascii')
|
||||
|
||||
return str(s, 'ascii', 'strict') if PY3 else s
|
||||
|
||||
|
||||
if PY3:
|
||||
def ascii_native_(s):
|
||||
if isinstance(s, text_type):
|
||||
s = s.encode('ascii')
|
||||
return str(s, 'ascii', 'strict')
|
||||
else:
|
||||
def ascii_native_(s):
|
||||
if isinstance(s, text_type):
|
||||
s = s.encode('ascii')
|
||||
return str(s)
|
||||
def native_(s, encoding='latin-1', errors='strict'):
|
||||
'''
|
||||
Python 3: If ``s`` is an instance of ``text_type``, return ``s``, otherwise
|
||||
return ``str(s, encoding, errors)``
|
||||
|
||||
ascii_native_.__doc__ = '''
|
||||
Python 3: If ``s`` is an instance of ``text_type``, return
|
||||
``s.encode('ascii')``, otherwise return ``str(s, 'ascii', 'strict')``
|
||||
Python 2: If ``s`` is an instance of ``text_type``, return
|
||||
``s.encode(encoding, errors)``, otherwise return ``str(s)``
|
||||
'''
|
||||
if PY3:
|
||||
out = s if isinstance(s, text_type) else str(s, encoding, errors)
|
||||
else:
|
||||
out = s.encode(encoding, errors) if isinstance(s, text_type) else str(s)
|
||||
|
||||
Python 2: If ``s`` is an instance of ``text_type``, return
|
||||
``s.encode('ascii')``, otherwise return ``str(s)``
|
||||
'''
|
||||
|
||||
|
||||
if PY3:
|
||||
def native_(s, encoding='latin-1', errors='strict'):
|
||||
'''
|
||||
If ``s`` is an instance of ``text_type``, return
|
||||
``s``, otherwise return ``str(s, encoding, errors)``
|
||||
'''
|
||||
if isinstance(s, text_type):
|
||||
return s
|
||||
return str(s, encoding, errors)
|
||||
else:
|
||||
def native_(s, encoding='latin-1', errors='strict'):
|
||||
'''
|
||||
If ``s`` is an instance of ``text_type``, return
|
||||
``s.encode(encoding, errors)``, otherwise return ``str(s)``
|
||||
'''
|
||||
if isinstance(s, text_type):
|
||||
return s.encode(encoding, errors)
|
||||
return str(s)
|
||||
|
||||
native_.__doc__ = '''
|
||||
Python 3: If ``s`` is an instance of ``text_type``, return ``s``, otherwise
|
||||
return ``str(s, encoding, errors)``
|
||||
|
||||
Python 2: If ``s`` is an instance of ``text_type``, return
|
||||
``s.encode(encoding, errors)``, otherwise return ``str(s)``
|
||||
'''
|
||||
return out
|
||||
|
||||
|
||||
def string_io(data=None): # cStringIO can't handle unicode
|
||||
|
@ -133,7 +112,199 @@ def string_io(data=None): # cStringIO can't handle unicode
|
|||
except (UnicodeEncodeError, TypeError):
|
||||
return StringIO(data)
|
||||
|
||||
if PY3:
|
||||
import ipaddress
|
||||
else:
|
||||
import salt.ext.ipaddress as ipaddress
|
||||
|
||||
try:
|
||||
if PY3:
|
||||
import ipaddress
|
||||
else:
|
||||
import salt.ext.ipaddress as ipaddress
|
||||
except ImportError:
|
||||
ipaddress = None
|
||||
|
||||
|
||||
class IPv6AddressScoped(ipaddress.IPv6Address):
|
||||
'''
|
||||
Represent and manipulate single IPv6 Addresses.
|
||||
Scope-aware version
|
||||
'''
|
||||
def __init__(self, address):
|
||||
'''
|
||||
Instantiate a new IPv6 address object. Scope is moved to an attribute 'scope'.
|
||||
|
||||
Args:
|
||||
address: A string or integer representing the IP
|
||||
|
||||
Additionally, an integer can be passed, so
|
||||
IPv6Address('2001:db8::') == IPv6Address(42540766411282592856903984951653826560)
|
||||
or, more generally
|
||||
IPv6Address(int(IPv6Address('2001:db8::'))) == IPv6Address('2001:db8::')
|
||||
|
||||
Raises:
|
||||
AddressValueError: If address isn't a valid IPv6 address.
|
||||
|
||||
:param address:
|
||||
'''
|
||||
# pylint: disable-all
|
||||
if not hasattr(self, '_is_packed_binary'):
|
||||
# This method (below) won't be around for some Python 3 versions
|
||||
# and we need check this differently anyway
|
||||
self._is_packed_binary = lambda p: isinstance(p, bytes)
|
||||
# pylint: enable-all
|
||||
|
||||
if isinstance(address, string_types) and '%' in address:
|
||||
buff = address.split('%')
|
||||
if len(buff) != 2:
|
||||
raise SaltException('Invalid IPv6 address: "{}"'.format(address))
|
||||
address, self.__scope = buff
|
||||
else:
|
||||
self.__scope = None
|
||||
|
||||
if sys.version_info.major == 2:
|
||||
ipaddress._BaseAddress.__init__(self, address)
|
||||
ipaddress._BaseV6.__init__(self, address)
|
||||
else:
|
||||
# Python 3.4 fix. Versions higher are simply not affected
|
||||
# https://github.com/python/cpython/blob/3.4/Lib/ipaddress.py#L543-L544
|
||||
self._version = 6
|
||||
self._max_prefixlen = ipaddress.IPV6LENGTH
|
||||
|
||||
# Efficient constructor from integer.
|
||||
if isinstance(address, integer_types):
|
||||
self._check_int_address(address)
|
||||
self._ip = address
|
||||
elif self._is_packed_binary(address):
|
||||
self._check_packed_address(address, 16)
|
||||
self._ip = ipaddress._int_from_bytes(address, 'big')
|
||||
else:
|
||||
address = str(address)
|
||||
if '/' in address:
|
||||
raise ipaddress.AddressValueError("Unexpected '/' in {}".format(address))
|
||||
self._ip = self._ip_int_from_string(address)
|
||||
|
||||
def _is_packed_binary(self, data):
|
||||
'''
|
||||
Check if data is hexadecimal packed
|
||||
|
||||
:param data:
|
||||
:return:
|
||||
'''
|
||||
packed = False
|
||||
if len(data) == 16 and ':' not in data:
|
||||
try:
|
||||
packed = bool(int(str(bytearray(data)).encode('hex'), 16))
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
return packed
|
||||
|
||||
@property
|
||||
def scope(self):
|
||||
'''
|
||||
Return scope of IPv6 address.
|
||||
|
||||
:return:
|
||||
'''
|
||||
return self.__scope
|
||||
|
||||
def __str__(self):
|
||||
return text_type(self._string_from_ip_int(self._ip) +
|
||||
('%' + self.scope if self.scope is not None else ''))
|
||||
|
||||
|
||||
class IPv6InterfaceScoped(ipaddress.IPv6Interface, IPv6AddressScoped):
|
||||
'''
|
||||
Update
|
||||
'''
|
||||
def __init__(self, address):
|
||||
if isinstance(address, (bytes, int)):
|
||||
IPv6AddressScoped.__init__(self, address)
|
||||
self.network = ipaddress.IPv6Network(self._ip)
|
||||
self._prefixlen = self._max_prefixlen
|
||||
return
|
||||
|
||||
addr = ipaddress._split_optional_netmask(address)
|
||||
IPv6AddressScoped.__init__(self, addr[0])
|
||||
self.network = ipaddress.IPv6Network(address, strict=False)
|
||||
self.netmask = self.network.netmask
|
||||
self._prefixlen = self.network._prefixlen
|
||||
self.hostmask = self.network.hostmask
|
||||
|
||||
|
||||
def ip_address(address):
|
||||
"""Take an IP string/int and return an object of the correct type.
|
||||
|
||||
Args:
|
||||
address: A string or integer, the IP address. Either IPv4 or
|
||||
IPv6 addresses may be supplied; integers less than 2**32 will
|
||||
be considered to be IPv4 by default.
|
||||
|
||||
Returns:
|
||||
An IPv4Address or IPv6Address object.
|
||||
|
||||
Raises:
|
||||
ValueError: if the *address* passed isn't either a v4 or a v6
|
||||
address
|
||||
|
||||
"""
|
||||
try:
|
||||
return ipaddress.IPv4Address(address)
|
||||
except (ipaddress.AddressValueError, ipaddress.NetmaskValueError) as err:
|
||||
log.debug('Error while parsing IPv4 address: %s', address)
|
||||
log.debug(err)
|
||||
|
||||
try:
|
||||
return IPv6AddressScoped(address)
|
||||
except (ipaddress.AddressValueError, ipaddress.NetmaskValueError) as err:
|
||||
log.debug('Error while parsing IPv6 address: %s', address)
|
||||
log.debug(err)
|
||||
|
||||
if isinstance(address, bytes):
|
||||
raise ipaddress.AddressValueError('{} does not appear to be an IPv4 or IPv6 address. '
|
||||
'Did you pass in a bytes (str in Python 2) instead '
|
||||
'of a unicode object?'.format(repr(address)))
|
||||
|
||||
raise ValueError('{} does not appear to be an IPv4 or IPv6 address'.format(repr(address)))
|
||||
|
||||
|
||||
def ip_interface(address):
|
||||
"""Take an IP string/int and return an object of the correct type.
|
||||
|
||||
Args:
|
||||
address: A string or integer, the IP address. Either IPv4 or
|
||||
IPv6 addresses may be supplied; integers less than 2**32 will
|
||||
be considered to be IPv4 by default.
|
||||
|
||||
Returns:
|
||||
An IPv4Interface or IPv6Interface object.
|
||||
|
||||
Raises:
|
||||
ValueError: if the string passed isn't either a v4 or a v6
|
||||
address.
|
||||
|
||||
Notes:
|
||||
The IPv?Interface classes describe an Address on a particular
|
||||
Network, so they're basically a combination of both the Address
|
||||
and Network classes.
|
||||
|
||||
"""
|
||||
try:
|
||||
return ipaddress.IPv4Interface(address)
|
||||
except (ipaddress.AddressValueError, ipaddress.NetmaskValueError) as err:
|
||||
log.debug('Error while getting IPv4 interface for address %s', address)
|
||||
log.debug(err)
|
||||
|
||||
try:
|
||||
return ipaddress.IPv6Interface(address)
|
||||
except (ipaddress.AddressValueError, ipaddress.NetmaskValueError) as err:
|
||||
log.debug('Error while getting IPv6 interface for address %s', address)
|
||||
log.debug(err)
|
||||
|
||||
raise ValueError('{} does not appear to be an IPv4 or IPv6 interface'.format(address))
|
||||
|
||||
|
||||
if ipaddress:
|
||||
ipaddress.IPv6Address = IPv6AddressScoped
|
||||
if sys.version_info.major == 2:
|
||||
ipaddress.IPv6Interface = IPv6InterfaceScoped
|
||||
ipaddress.ip_address = ip_address
|
||||
ipaddress.ip_interface = ip_interface
|
||||
|
|
|
@ -283,12 +283,14 @@ def auth(username, password):
|
|||
log.error('LDAP authentication requires python-ldap module')
|
||||
return False
|
||||
|
||||
bind = None
|
||||
|
||||
# If bind credentials are configured, verify that we receive a valid bind
|
||||
if _config('binddn', mandatory=False) and _config('bindpw', mandatory=False):
|
||||
bind = _bind_for_search(anonymous=_config('anonymous', mandatory=False))
|
||||
search_bind = _bind_for_search(anonymous=_config('anonymous', mandatory=False))
|
||||
|
||||
# If username & password are not None, attempt to verify they are valid
|
||||
if bind and username and password:
|
||||
if search_bind and username and password:
|
||||
bind = _bind(username, password,
|
||||
anonymous=_config('auth_by_group_membership_only', mandatory=False)
|
||||
and _config('anonymous', mandatory=False))
|
||||
|
|
|
@ -428,3 +428,9 @@ class Beacon(object):
|
|||
tag='/salt/minion/minion_beacon_disabled_complete')
|
||||
|
||||
return True
|
||||
|
||||
def reset(self):
|
||||
'''
|
||||
Reset the beacons to defaults
|
||||
'''
|
||||
self.opts['beacons'] = {}
|
||||
|
|
|
@ -12,6 +12,8 @@ from __future__ import absolute_import, unicode_literals
|
|||
import logging
|
||||
import re
|
||||
|
||||
import salt.utils.platform
|
||||
|
||||
# Import Third Party Libs
|
||||
try:
|
||||
import psutil
|
||||
|
@ -94,19 +96,21 @@ def beacon(config):
|
|||
if not mount.endswith('$'):
|
||||
mount_re = '{0}$'.format(mount)
|
||||
|
||||
if salt.utils.platform.is_windows():
|
||||
mount_re = re.sub('\\$', '\\\\', mount_re)
|
||||
|
||||
for part in parts:
|
||||
if re.match(mount_re, part.mountpoint):
|
||||
_mount = part.mountpoint
|
||||
|
||||
try:
|
||||
_current_usage = psutil.disk_usage(mount)
|
||||
_current_usage = psutil.disk_usage(_mount)
|
||||
except OSError:
|
||||
log.warning('%s is not a valid mount point.', mount)
|
||||
log.warning('%s is not a valid mount point.', _mount)
|
||||
continue
|
||||
|
||||
current_usage = _current_usage.percent
|
||||
monitor_usage = mounts[mount]
|
||||
log.debug('current_usage %s', current_usage)
|
||||
if '%' in monitor_usage:
|
||||
monitor_usage = re.sub('%', '', monitor_usage)
|
||||
monitor_usage = float(monitor_usage)
|
||||
|
|
|
@ -11,7 +11,7 @@ Watch files and translate the changes into salt events
|
|||
the beacon configuration.
|
||||
|
||||
:note: The `inotify` beacon only works on OSes that have `inotify`
|
||||
kernel support. Currently this excludes FreeBSD, macOS, and Windows.
|
||||
kernel support.
|
||||
|
||||
'''
|
||||
# Import Python libs
|
||||
|
|
|
@ -41,6 +41,16 @@ def validate(config):
|
|||
_config = {}
|
||||
list(map(_config.update, config))
|
||||
|
||||
if 'emitatstartup' in _config:
|
||||
if not isinstance(_config['emitatstartup'], bool):
|
||||
return False, ('Configuration for load beacon option '
|
||||
'emitatstartup must be a boolean.')
|
||||
|
||||
if 'onchangeonly' in _config:
|
||||
if not isinstance(_config['onchangeonly'], bool):
|
||||
return False, ('Configuration for load beacon option '
|
||||
'onchangeonly must be a boolean.')
|
||||
|
||||
if 'averages' not in _config:
|
||||
return False, ('Averages configuration is required'
|
||||
' for load beacon.')
|
||||
|
@ -61,6 +71,7 @@ def validate(config):
|
|||
return False, ('Configuration for load beacon: '
|
||||
'1m, 5m and 15m items must be '
|
||||
'a list of two items.')
|
||||
|
||||
return True, 'Valid beacon configuration'
|
||||
|
||||
|
||||
|
@ -118,7 +129,7 @@ def beacon(config):
|
|||
if not LAST_STATUS:
|
||||
for k in ['1m', '5m', '15m']:
|
||||
LAST_STATUS[k] = avg_dict[k]
|
||||
if not config['emitatstartup']:
|
||||
if not _config['emitatstartup']:
|
||||
log.debug("Don't emit because emitatstartup is False")
|
||||
return ret
|
||||
|
||||
|
|
21
salt/cache/etcd_cache.py
vendored
21
salt/cache/etcd_cache.py
vendored
|
@ -50,6 +50,7 @@ value to ``etcd``:
|
|||
'''
|
||||
from __future__ import absolute_import, print_function, unicode_literals
|
||||
import logging
|
||||
import base64
|
||||
try:
|
||||
import etcd
|
||||
HAS_ETCD = True
|
||||
|
@ -112,7 +113,7 @@ def _init_client():
|
|||
log.info("etcd: Setting up client with params: %r", etcd_kwargs)
|
||||
client = etcd.Client(**etcd_kwargs)
|
||||
try:
|
||||
client.get(path_prefix)
|
||||
client.read(path_prefix)
|
||||
except etcd.EtcdKeyNotFound:
|
||||
log.info("etcd: Creating dir %r", path_prefix)
|
||||
client.write(path_prefix, None, dir=True)
|
||||
|
@ -126,7 +127,7 @@ def store(bank, key, data):
|
|||
etcd_key = '{0}/{1}/{2}'.format(path_prefix, bank, key)
|
||||
try:
|
||||
value = __context__['serial'].dumps(data)
|
||||
client.set(etcd_key, value)
|
||||
client.write(etcd_key, base64.b64encode(value))
|
||||
except Exception as exc:
|
||||
raise SaltCacheError(
|
||||
'There was an error writing the key, {0}: {1}'.format(etcd_key, exc)
|
||||
|
@ -140,10 +141,10 @@ def fetch(bank, key):
|
|||
_init_client()
|
||||
etcd_key = '{0}/{1}/{2}'.format(path_prefix, bank, key)
|
||||
try:
|
||||
value = client.get(etcd_key).value
|
||||
if value is None:
|
||||
return {}
|
||||
return __context__['serial'].loads(value)
|
||||
value = client.read(etcd_key).value
|
||||
return __context__['serial'].loads(base64.b64decode(value))
|
||||
except etcd.EtcdKeyNotFound:
|
||||
return {}
|
||||
except Exception as exc:
|
||||
raise SaltCacheError(
|
||||
'There was an error reading the key, {0}: {1}'.format(
|
||||
|
@ -162,7 +163,7 @@ def flush(bank, key=None):
|
|||
else:
|
||||
etcd_key = '{0}/{1}/{2}'.format(path_prefix, bank, key)
|
||||
try:
|
||||
client.get(etcd_key)
|
||||
client.read(etcd_key)
|
||||
except etcd.EtcdKeyNotFound:
|
||||
return # nothing to flush
|
||||
try:
|
||||
|
@ -184,7 +185,7 @@ def _walk(r):
|
|||
return [r.key.split('/', 3)[3]]
|
||||
|
||||
keys = []
|
||||
for c in client.get(r.key).children:
|
||||
for c in client.read(r.key).children:
|
||||
keys.extend(_walk(c))
|
||||
return keys
|
||||
|
||||
|
@ -197,7 +198,7 @@ def ls(bank):
|
|||
_init_client()
|
||||
path = '{0}/{1}'.format(path_prefix, bank)
|
||||
try:
|
||||
return _walk(client.get(path))
|
||||
return _walk(client.read(path))
|
||||
except Exception as exc:
|
||||
raise SaltCacheError(
|
||||
'There was an error getting the key "{0}": {1}'.format(
|
||||
|
@ -213,7 +214,7 @@ def contains(bank, key):
|
|||
_init_client()
|
||||
etcd_key = '{0}/{1}/{2}'.format(path_prefix, bank, key)
|
||||
try:
|
||||
r = client.get(etcd_key)
|
||||
r = client.read(etcd_key)
|
||||
# return True for keys, not dirs
|
||||
return r.dir is False
|
||||
except etcd.EtcdKeyNotFound:
|
||||
|
|
3
salt/cache/redis_cache.py
vendored
3
salt/cache/redis_cache.py
vendored
|
@ -231,8 +231,7 @@ def _get_redis_server(opts=None):
|
|||
|
||||
if opts['cluster_mode']:
|
||||
REDIS_SERVER = StrictRedisCluster(startup_nodes=opts['startup_nodes'],
|
||||
skip_full_coverage_check=opts['skip_full_coverage_check'],
|
||||
decode_responses=True)
|
||||
skip_full_coverage_check=opts['skip_full_coverage_check'])
|
||||
else:
|
||||
REDIS_SERVER = redis.StrictRedis(opts['host'],
|
||||
opts['port'],
|
||||
|
|
|
@ -1182,7 +1182,7 @@ class LocalClient(object):
|
|||
# stop the iteration, since the jid is invalid
|
||||
raise StopIteration()
|
||||
except Exception as exc:
|
||||
log.warning('Returner unavailable: %s', exc)
|
||||
log.warning('Returner unavailable: %s', exc, exc_info_on_loglevel=logging.DEBUG)
|
||||
# Wait for the hosts to check in
|
||||
last_time = False
|
||||
# iterator for this job's return
|
||||
|
@ -1313,6 +1313,13 @@ class LocalClient(object):
|
|||
if raw['data']['return'] == {}:
|
||||
continue
|
||||
|
||||
# if the minion throws an exception containing the word "return"
|
||||
# the master will try to handle the string as a dict in the next
|
||||
# step. Check if we have a string, log the issue and continue.
|
||||
if isinstance(raw['data']['return'], six.string_types):
|
||||
log.error("unexpected return from minion: %s", raw)
|
||||
continue
|
||||
|
||||
if 'return' in raw['data']['return'] and \
|
||||
raw['data']['return']['return'] == {}:
|
||||
continue
|
||||
|
|
|
@ -445,7 +445,7 @@ class SSH(object):
|
|||
if target.get('passwd', False) or self.opts['ssh_passwd']:
|
||||
self._key_deploy_run(host, target, False)
|
||||
return ret
|
||||
if (ret[host].get('stderr') or '').count('Permission denied'):
|
||||
if ret[host].get('stderr', '').count('Permission denied'):
|
||||
target = self.targets[host]
|
||||
# permission denied, attempt to auto deploy ssh key
|
||||
print(('Permission denied for host {0}, do you want to deploy '
|
||||
|
@ -702,7 +702,7 @@ class SSH(object):
|
|||
'''
|
||||
Execute the overall routine, print results via outputters
|
||||
'''
|
||||
if self.opts['list_hosts']:
|
||||
if self.opts.get('list_hosts'):
|
||||
self._get_roster()
|
||||
ret = {}
|
||||
for roster_file in self.__parsed_rosters:
|
||||
|
@ -1052,10 +1052,10 @@ class Single(object):
|
|||
opts_pkg['module_dirs'] = self.opts['module_dirs']
|
||||
opts_pkg['_ssh_version'] = self.opts['_ssh_version']
|
||||
opts_pkg['__master_opts__'] = self.context['master_opts']
|
||||
if '_caller_cachedir' in self.opts:
|
||||
opts_pkg['_caller_cachedir'] = self.opts['_caller_cachedir']
|
||||
if 'known_hosts_file' in self.opts:
|
||||
opts_pkg['known_hosts_file'] = self.opts['known_hosts_file']
|
||||
if '_caller_cachedir' in self.opts:
|
||||
opts_pkg['_caller_cachedir'] = self.opts['_caller_cachedir']
|
||||
else:
|
||||
opts_pkg['_caller_cachedir'] = self.opts['cachedir']
|
||||
# Use the ID defined in the roster file
|
||||
|
@ -1347,12 +1347,20 @@ ARGS = {10}\n'''.format(self.minion_config,
|
|||
if not self.tty:
|
||||
# If RSTR is not seen in both stdout and stderr then there
|
||||
# was a thin deployment problem.
|
||||
log.error('ERROR: Failure deploying thin, retrying: %s\n%s', stdout, stderr)
|
||||
log.error(
|
||||
'ERROR: Failure deploying thin, retrying:\n'
|
||||
'STDOUT:\n%s\nSTDERR:\n%s\nRETCODE: %s',
|
||||
stdout, stderr, retcode
|
||||
)
|
||||
return self.cmd_block()
|
||||
elif not re.search(RSTR_RE, stdout):
|
||||
# If RSTR is not seen in stdout with tty, then there
|
||||
# was a thin deployment problem.
|
||||
log.error('ERROR: Failure deploying thin, retrying: %s\n%s', stdout, stderr)
|
||||
log.error(
|
||||
'ERROR: Failure deploying thin, retrying:\n'
|
||||
'STDOUT:\n%s\nSTDERR:\n%s\nRETCODE: %s',
|
||||
stdout, stderr, retcode
|
||||
)
|
||||
while re.search(RSTR_RE, stdout):
|
||||
stdout = re.split(RSTR_RE, stdout, 1)[1].strip()
|
||||
if self.tty:
|
||||
|
|
|
@ -247,7 +247,7 @@ def filter_by(lookup_dict,
|
|||
each case to be collected in the base and overridden by the grain
|
||||
selection dictionary and the merge dictionary. Default is None.
|
||||
|
||||
.. versionadded:: 2015.8.11, 2016.3.2
|
||||
.. versionadded:: 2015.8.11,2016.3.2
|
||||
|
||||
CLI Example:
|
||||
|
||||
|
|
|
@ -50,6 +50,7 @@ from salt.exceptions import (
|
|||
SaltCloudExecutionFailure,
|
||||
SaltCloudExecutionTimeout
|
||||
)
|
||||
from salt.utils.stringutils import to_bytes
|
||||
|
||||
# Import 3rd-party libs
|
||||
from salt.ext import six
|
||||
|
@ -770,7 +771,7 @@ def _compute_signature(parameters, access_key_secret):
|
|||
# All aliyun API only support GET method
|
||||
stringToSign = 'GET&%2F&' + percent_encode(canonicalizedQueryString[1:])
|
||||
|
||||
h = hmac.new(access_key_secret + "&", stringToSign, sha1)
|
||||
h = hmac.new(to_bytes(access_key_secret + "&"), stringToSign, sha1)
|
||||
signature = base64.encodestring(h.digest()).strip()
|
||||
return signature
|
||||
|
||||
|
|
|
@ -60,6 +60,7 @@ import logging
|
|||
import pprint
|
||||
import base64
|
||||
import collections
|
||||
import pkgutil
|
||||
import salt.cache
|
||||
import salt.config as config
|
||||
import salt.utils.cloud
|
||||
|
@ -67,7 +68,6 @@ import salt.utils.data
|
|||
import salt.utils.files
|
||||
import salt.utils.stringutils
|
||||
import salt.utils.yaml
|
||||
from salt.utils.versions import LooseVersion
|
||||
from salt.ext import six
|
||||
import salt.version
|
||||
from salt.exceptions import (
|
||||
|
@ -117,9 +117,12 @@ try:
|
|||
from azure.mgmt.storage import StorageManagementClient
|
||||
from azure.mgmt.web import WebSiteManagementClient
|
||||
from msrestazure.azure_exceptions import CloudError
|
||||
from azure.multiapi.storage.v2016_05_31 import CloudStorageAccount
|
||||
from azure.cli import core
|
||||
HAS_LIBS = LooseVersion(core.__version__) >= LooseVersion("2.0.12")
|
||||
if pkgutil.find_loader('azure.multiapi'):
|
||||
# use multiapi version if available
|
||||
from azure.multiapi.storage.v2016_05_31 import CloudStorageAccount
|
||||
else:
|
||||
from azure.storage import CloudStorageAccount
|
||||
HAS_LIBS = True
|
||||
except ImportError:
|
||||
pass
|
||||
# pylint: enable=wrong-import-position,wrong-import-order
|
||||
|
@ -152,8 +155,7 @@ def __virtual__():
|
|||
False,
|
||||
'The following dependencies are required to use the AzureARM driver: '
|
||||
'Microsoft Azure SDK for Python >= 2.0rc5, '
|
||||
'Microsoft Azure Storage SDK for Python >= 0.32, '
|
||||
'Microsoft Azure CLI >= 2.0.12'
|
||||
'Microsoft Azure Storage SDK for Python >= 0.32'
|
||||
)
|
||||
|
||||
global cache # pylint: disable=global-statement,invalid-name
|
||||
|
@ -926,7 +928,7 @@ def create_interface(call=None, kwargs=None): # pylint: disable=unused-argument
|
|||
)
|
||||
if pub_ip_data.ip_address: # pylint: disable=no-member
|
||||
ip_kwargs['public_ip_address'] = PublicIPAddress(
|
||||
six.text_type(pub_ip_data.id), # pylint: disable=no-member
|
||||
id=six.text_type(pub_ip_data.id), # pylint: disable=no-member
|
||||
)
|
||||
ip_configurations = [
|
||||
NetworkInterfaceIPConfiguration(
|
||||
|
|
|
@ -134,7 +134,8 @@ def __virtual__():
|
|||
|
||||
parameters = details['gce']
|
||||
pathname = os.path.expanduser(parameters['service_account_private_key'])
|
||||
if salt.utils.cloud.check_key_path_and_mode(
|
||||
# empty pathname will tell libcloud to use instance credentials
|
||||
if pathname and salt.utils.cloud.check_key_path_and_mode(
|
||||
provider, pathname
|
||||
) is False:
|
||||
return False
|
||||
|
|
|
@ -658,8 +658,6 @@ def _get_properties(path="", method="GET", forced_params=None):
|
|||
props = sub['info'][method]['parameters']['properties'].keys()
|
||||
except KeyError as exc:
|
||||
log.error('method not found: "%s"', exc)
|
||||
except:
|
||||
raise
|
||||
for prop in props:
|
||||
numerical = re.match(r'(\w+)\[n\]', prop)
|
||||
# generate (arbitrarily) 10 properties for duplicatable properties identified by:
|
||||
|
@ -728,6 +726,9 @@ def create_node(vm_, newid):
|
|||
if prop in vm_: # if the property is set, use it for the VM request
|
||||
newnode[prop] = vm_[prop]
|
||||
|
||||
if 'pubkey' in vm_:
|
||||
newnode['ssh-public-keys'] = vm_['pubkey']
|
||||
|
||||
# inform user the "disk" option is not supported for LXC hosts
|
||||
if 'disk' in vm_:
|
||||
log.warning('The "disk" option is not supported for LXC hosts and was ignored')
|
||||
|
@ -893,7 +894,7 @@ def destroy(name, call=None):
|
|||
|
||||
# required to wait a bit here, otherwise the VM is sometimes
|
||||
# still locked and destroy fails.
|
||||
time.sleep(1)
|
||||
time.sleep(3)
|
||||
|
||||
query('delete', 'nodes/{0}/{1}'.format(
|
||||
vmobj['node'], vmobj['id']
|
||||
|
|
|
@ -27,10 +27,7 @@ import salt.utils.cloud
|
|||
import salt.config as config
|
||||
import salt.client
|
||||
import salt.ext.six as six
|
||||
if six.PY3:
|
||||
import ipaddress
|
||||
else:
|
||||
import salt.ext.ipaddress as ipaddress
|
||||
from salt._compat import ipaddress
|
||||
|
||||
from salt.exceptions import SaltCloudException, SaltCloudSystemExit
|
||||
|
||||
|
|
|
@ -25,13 +25,8 @@ import tempfile
|
|||
import salt.utils
|
||||
import salt.config as config
|
||||
import salt.client
|
||||
import salt.ext.six as six
|
||||
if six.PY3:
|
||||
import ipaddress
|
||||
else:
|
||||
import salt.ext.ipaddress as ipaddress
|
||||
from salt.exceptions import SaltCloudException, SaltCloudSystemExit, \
|
||||
SaltInvocationError
|
||||
from salt._compat import ipaddress
|
||||
from salt.exceptions import SaltCloudException, SaltCloudSystemExit, SaltInvocationError
|
||||
|
||||
# Get logging started
|
||||
log = logging.getLogger(__name__)
|
||||
|
|
|
@ -674,6 +674,19 @@ def _get_size_spec(device, size_gb=None, size_kb=None):
|
|||
return disk_spec
|
||||
|
||||
|
||||
def _iter_disk_unit_number(unit_number):
|
||||
'''
|
||||
Apparently vmware reserves ID 7 for SCSI controllers, so we cannot specify
|
||||
hard drives for 7.
|
||||
|
||||
Skip 7 to make sure.
|
||||
'''
|
||||
unit_number += 1
|
||||
if unit_number == 7:
|
||||
unit_number += 1
|
||||
return unit_number
|
||||
|
||||
|
||||
def _manage_devices(devices, vm=None, container_ref=None, new_vm_name=None):
|
||||
unit_number = 0
|
||||
bus_number = 0
|
||||
|
@ -695,7 +708,7 @@ def _manage_devices(devices, vm=None, container_ref=None, new_vm_name=None):
|
|||
# this is a hard disk
|
||||
if 'disk' in list(devices.keys()):
|
||||
# there is atleast one disk specified to be created/configured
|
||||
unit_number += 1
|
||||
unit_number = _iter_disk_unit_number(unit_number)
|
||||
existing_disks_label.append(device.deviceInfo.label)
|
||||
if device.deviceInfo.label in list(devices['disk'].keys()):
|
||||
disk_spec = None
|
||||
|
@ -862,7 +875,7 @@ def _manage_devices(devices, vm=None, container_ref=None, new_vm_name=None):
|
|||
break
|
||||
|
||||
device_specs.append(disk_spec)
|
||||
unit_number += 1
|
||||
unit_number = _iter_disk_unit_number(unit_number)
|
||||
|
||||
if 'cd' in list(devices.keys()):
|
||||
cd_drives_to_create = list(set(devices['cd'].keys()) - set(existing_cd_drives_label))
|
||||
|
|
|
@ -93,6 +93,7 @@ def _gather_buffer_space():
|
|||
# Return the higher number between 5% of the system memory and 10MiB
|
||||
return max([total_mem * 0.05, 10 << 20])
|
||||
|
||||
|
||||
# For the time being this will be a fixed calculation
|
||||
# TODO: Allow user configuration
|
||||
_DFLT_IPC_WBUFFER = _gather_buffer_space() * .5
|
||||
|
@ -373,7 +374,7 @@ VALID_OPTS = {
|
|||
'ipc_mode': six.string_types,
|
||||
|
||||
# Enable ipv6 support for daemons
|
||||
'ipv6': bool,
|
||||
'ipv6': (type(None), bool),
|
||||
|
||||
# The chunk size to use when streaming files with the file server
|
||||
'file_buffer_size': int,
|
||||
|
@ -442,6 +443,9 @@ VALID_OPTS = {
|
|||
# Tell the loader to attempt to import *.pyx cython files if cython is available
|
||||
'cython_enable': bool,
|
||||
|
||||
# Whether or not to load grains for the GPU
|
||||
'enable_gpu_grains': bool,
|
||||
|
||||
# Tell the loader to attempt to import *.zip archives
|
||||
'enable_zip_modules': bool,
|
||||
|
||||
|
@ -515,6 +519,7 @@ VALID_OPTS = {
|
|||
# The number of seconds to sleep between retrying an attempt to resolve the hostname of a
|
||||
# salt master
|
||||
'retry_dns': float,
|
||||
'retry_dns_count': (type(None), int),
|
||||
|
||||
# In the case when the resolve of the salt master hostname fails, fall back to localhost
|
||||
'resolve_dns_fallback': bool,
|
||||
|
@ -1354,7 +1359,7 @@ DEFAULT_MINION_OPTS = {
|
|||
'mine_interval': 60,
|
||||
'ipc_mode': _DFLT_IPC_MODE,
|
||||
'ipc_write_buffer': _DFLT_IPC_WBUFFER,
|
||||
'ipv6': False,
|
||||
'ipv6': None,
|
||||
'file_buffer_size': 262144,
|
||||
'tcp_pub_port': 4510,
|
||||
'tcp_pull_port': 4511,
|
||||
|
@ -1373,6 +1378,7 @@ DEFAULT_MINION_OPTS = {
|
|||
'test': False,
|
||||
'ext_job_cache': '',
|
||||
'cython_enable': False,
|
||||
'enable_gpu_grains': True,
|
||||
'enable_zip_modules': False,
|
||||
'state_verbose': True,
|
||||
'state_output': 'full',
|
||||
|
@ -1393,6 +1399,7 @@ DEFAULT_MINION_OPTS = {
|
|||
'update_url': False,
|
||||
'update_restart_services': [],
|
||||
'retry_dns': 30,
|
||||
'retry_dns_count': None,
|
||||
'resolve_dns_fallback': True,
|
||||
'recon_max': 10000,
|
||||
'recon_default': 1000,
|
||||
|
@ -1679,7 +1686,7 @@ DEFAULT_MASTER_OPTS = {
|
|||
'enforce_mine_cache': False,
|
||||
'ipc_mode': _DFLT_IPC_MODE,
|
||||
'ipc_write_buffer': _DFLT_IPC_WBUFFER,
|
||||
'ipv6': False,
|
||||
'ipv6': None,
|
||||
'tcp_master_pub_port': 4512,
|
||||
'tcp_master_pull_port': 4513,
|
||||
'tcp_master_publish_pull': 4514,
|
||||
|
@ -3482,7 +3489,7 @@ def check_driver_dependencies(driver, dependencies):
|
|||
if value is False:
|
||||
log.warning(
|
||||
"Missing dependency: '%s'. The %s driver requires "
|
||||
"'%s' to be installed.", key, key, driver
|
||||
"'%s' to be installed.", key, driver, key
|
||||
)
|
||||
ret = False
|
||||
|
||||
|
|
|
@ -1212,9 +1212,17 @@ class SAuth(AsyncAuth):
|
|||
creds = self.sign_in(channel=channel)
|
||||
if creds == 'retry':
|
||||
if self.opts.get('caller'):
|
||||
print('Minion failed to authenticate with the master, '
|
||||
'has the minion key been accepted?')
|
||||
sys.exit(2)
|
||||
# We have a list of masters, so we should break
|
||||
# and try the next one in the list.
|
||||
if self.opts.get('local_masters', None):
|
||||
error = SaltClientError('Minion failed to authenticate'
|
||||
' with the master, has the '
|
||||
'minion key been accepted?')
|
||||
break
|
||||
else:
|
||||
print('Minion failed to authenticate with the master, '
|
||||
'has the minion key been accepted?')
|
||||
sys.exit(2)
|
||||
if acceptance_wait_time:
|
||||
log.info('Waiting %s seconds before retry.', acceptance_wait_time)
|
||||
time.sleep(acceptance_wait_time)
|
||||
|
|
|
@ -6,7 +6,14 @@ Minion enabling different transports.
|
|||
from __future__ import absolute_import, print_function, unicode_literals
|
||||
# Import Python Libs
|
||||
import sys
|
||||
from collections import namedtuple, Iterable, Sequence, Mapping
|
||||
|
||||
try:
|
||||
from collections.abc import Iterable, Sequence, Mapping
|
||||
except ImportError:
|
||||
from collections import Iterable, Sequence, Mapping
|
||||
|
||||
from collections import namedtuple
|
||||
|
||||
import logging
|
||||
|
||||
# Import Salt Libs
|
||||
|
|
|
@ -30,16 +30,16 @@ import salt.utils.event
|
|||
# Import third-party libs
|
||||
try:
|
||||
import logstash
|
||||
HAS_LOGSTASH = True
|
||||
except ImportError:
|
||||
HAS_LOGSTASH = False
|
||||
logstash = None
|
||||
|
||||
__virtualname__ = 'logstash'
|
||||
|
||||
|
||||
def __virtual__():
|
||||
if not HAS_LOGSTASH:
|
||||
return False
|
||||
else:
|
||||
return True
|
||||
return __virtualname__ \
|
||||
if logstash is not None \
|
||||
else (False, 'python-logstash not installed')
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
|
@ -63,7 +63,7 @@ class Listener(object):
|
|||
tag = 'salt/engine/redis_sentinel'
|
||||
super(Listener, self).__init__()
|
||||
self.tag = tag
|
||||
self.redis = redis.StrictRedis(host=host, port=port)
|
||||
self.redis = redis.StrictRedis(host=host, port=port, decode_responses=True)
|
||||
self.pubsub = self.redis.pubsub()
|
||||
self.pubsub.psubscribe(channels)
|
||||
self.fire_master = salt.utils.event.get_master_event(__opts__, __opts__['sock_dir']).fire_event
|
||||
|
|
|
@ -593,7 +593,7 @@ class SlackClient(object):
|
|||
Run each of them through ``get_configured_target(('foo', f), 'pillar.get')`` and confirm a valid target
|
||||
|
||||
'''
|
||||
# Default to targetting all minions with a type of glob
|
||||
# Default to targeting all minions with a type of glob
|
||||
null_target = {'target': '*', 'tgt_type': 'glob'}
|
||||
|
||||
def check_cmd_against_group(cmd):
|
||||
|
@ -627,14 +627,12 @@ class SlackClient(object):
|
|||
return checked
|
||||
return null_target
|
||||
|
||||
|
||||
# emulate the yaml_out output formatter. It relies on a global __opts__ object which we can't
|
||||
# obviously pass in
|
||||
|
||||
def format_return_text(self, data, function, **kwargs): # pylint: disable=unused-argument
|
||||
'''
|
||||
Print out YAML using the block mode
|
||||
'''
|
||||
# emulate the yaml_out output formatter. It relies on a global __opts__ object which
|
||||
# we can't obviously pass in
|
||||
try:
|
||||
# Format results from state runs with highstate output
|
||||
if function.startswith('state'):
|
||||
|
|
|
@ -96,6 +96,12 @@ class SaltSyndicMasterError(SaltException):
|
|||
'''
|
||||
|
||||
|
||||
class SaltMasterUnresolvableError(SaltException):
|
||||
'''
|
||||
Problem resolving the name of the Salt master
|
||||
'''
|
||||
|
||||
|
||||
class MasterExit(SystemExit):
|
||||
'''
|
||||
Rise when the master exits
|
||||
|
|
|
@ -7,6 +7,8 @@ from __future__ import absolute_import, print_function, unicode_literals
|
|||
import time
|
||||
import logging
|
||||
|
||||
import salt.utils.stringutils
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
_DEFAULT_SPLAYTIME = 300
|
||||
|
@ -28,7 +30,7 @@ def _get_hash():
|
|||
bitmask = 0xffffffff
|
||||
h = 0
|
||||
|
||||
for i in bytearray(__grains__['id']):
|
||||
for i in bytearray(salt.utils.stringutils.to_bytes(__grains__['id'])):
|
||||
h = (h + i) & bitmask
|
||||
h = (h + (h << 10)) & bitmask
|
||||
h = (h ^ (h >> 6)) & bitmask
|
||||
|
|
|
@ -201,7 +201,7 @@ def v4_int_to_packed(address):
|
|||
"""
|
||||
try:
|
||||
return _int_to_bytes(address, 4, 'big')
|
||||
except:
|
||||
except Exception:
|
||||
raise ValueError("Address negative or too large for IPv4")
|
||||
|
||||
|
||||
|
@ -217,7 +217,7 @@ def v6_int_to_packed(address):
|
|||
"""
|
||||
try:
|
||||
return _int_to_bytes(address, 16, 'big')
|
||||
except:
|
||||
except Exception:
|
||||
raise ValueError("Address negative or too large for IPv6")
|
||||
|
||||
|
||||
|
|
|
@ -9,7 +9,7 @@ from __future__ import absolute_import
|
|||
import socket
|
||||
import ctypes
|
||||
import os
|
||||
import ipaddress
|
||||
from salt._compat import ipaddress
|
||||
import salt.ext.six as six
|
||||
|
||||
|
||||
|
|
|
@ -61,7 +61,7 @@ def get_file_client(opts, pillar=False):
|
|||
return {
|
||||
'remote': RemoteClient,
|
||||
'local': FSClient,
|
||||
'pillar': LocalClient,
|
||||
'pillar': PillarClient,
|
||||
}.get(client, RemoteClient)(opts)
|
||||
|
||||
|
||||
|
@ -347,58 +347,17 @@ class Client(object):
|
|||
Return a list of all available sls modules on the master for a given
|
||||
environment
|
||||
'''
|
||||
|
||||
limit_traversal = self.opts.get('fileserver_limit_traversal', False)
|
||||
states = []
|
||||
|
||||
if limit_traversal:
|
||||
if saltenv not in self.opts['file_roots']:
|
||||
log.warning(
|
||||
'During an attempt to list states for saltenv \'%s\', '
|
||||
'the environment could not be found in the configured '
|
||||
'file roots', saltenv
|
||||
)
|
||||
return states
|
||||
for path in self.opts['file_roots'][saltenv]:
|
||||
for root, dirs, files in os.walk(path, topdown=True): # future lint: disable=blacklisted-function
|
||||
root = salt.utils.data.decode(root)
|
||||
files = salt.utils.data.decode(files)
|
||||
log.debug(
|
||||
'Searching for states in dirs %s and files %s',
|
||||
salt.utils.data.decode(dirs), files
|
||||
)
|
||||
if not [filename.endswith('.sls') for filename in files]:
|
||||
# Use shallow copy so we don't disturb the memory used
|
||||
# by os.walk. Otherwise this breaks!
|
||||
del dirs[:]
|
||||
else:
|
||||
for found_file in files:
|
||||
stripped_root = os.path.relpath(root, path)
|
||||
if salt.utils.platform.is_windows():
|
||||
stripped_root = stripped_root.replace('\\', '/')
|
||||
stripped_root = stripped_root.replace('/', '.')
|
||||
if found_file.endswith(('.sls')):
|
||||
if found_file.endswith('init.sls'):
|
||||
if stripped_root.endswith('.'):
|
||||
stripped_root = stripped_root.rstrip('.')
|
||||
states.append(stripped_root)
|
||||
else:
|
||||
if not stripped_root.endswith('.'):
|
||||
stripped_root += '.'
|
||||
if stripped_root.startswith('.'):
|
||||
stripped_root = stripped_root.lstrip('.')
|
||||
states.append(stripped_root + found_file[:-4])
|
||||
else:
|
||||
for path in self.file_list(saltenv):
|
||||
if salt.utils.platform.is_windows():
|
||||
path = path.replace('\\', '/')
|
||||
if path.endswith('.sls'):
|
||||
# is an sls module!
|
||||
if path.endswith('/init.sls'):
|
||||
states.append(path.replace('/', '.')[:-9])
|
||||
else:
|
||||
states.append(path.replace('/', '.')[:-4])
|
||||
return states
|
||||
states = set()
|
||||
for path in self.file_list(saltenv):
|
||||
if salt.utils.platform.is_windows():
|
||||
path = path.replace('\\', '/')
|
||||
if path.endswith('.sls'):
|
||||
# is an sls module!
|
||||
if path.endswith('/init.sls'):
|
||||
states.add(path.replace('/', '.')[:-9])
|
||||
else:
|
||||
states.add(path.replace('/', '.')[:-4])
|
||||
return sorted(states)
|
||||
|
||||
def get_state(self, sls, saltenv, cachedir=None):
|
||||
'''
|
||||
|
@ -844,13 +803,10 @@ class Client(object):
|
|||
)
|
||||
|
||||
|
||||
class LocalClient(Client):
|
||||
class PillarClient(Client):
|
||||
'''
|
||||
Use the local_roots option to parse a local file root
|
||||
Used by pillar to handle fileclient requests
|
||||
'''
|
||||
def __init__(self, opts):
|
||||
Client.__init__(self, opts)
|
||||
|
||||
def _find_file(self, path, saltenv='base'):
|
||||
'''
|
||||
Locate the file path
|
||||
|
@ -858,12 +814,12 @@ class LocalClient(Client):
|
|||
fnd = {'path': '',
|
||||
'rel': ''}
|
||||
|
||||
if saltenv not in self.opts['file_roots']:
|
||||
if saltenv not in self.opts['pillar_roots']:
|
||||
return fnd
|
||||
if salt.utils.url.is_escaped(path):
|
||||
# The path arguments are escaped
|
||||
path = salt.utils.url.unescape(path)
|
||||
for root in self.opts['file_roots'][saltenv]:
|
||||
for root in self.opts['pillar_roots'][saltenv]:
|
||||
full = os.path.join(root, path)
|
||||
if os.path.isfile(full):
|
||||
fnd['path'] = full
|
||||
|
@ -896,10 +852,10 @@ class LocalClient(Client):
|
|||
with optional relative prefix path to limit directory traversal
|
||||
'''
|
||||
ret = []
|
||||
if saltenv not in self.opts['file_roots']:
|
||||
if saltenv not in self.opts['pillar_roots']:
|
||||
return ret
|
||||
prefix = prefix.strip('/')
|
||||
for path in self.opts['file_roots'][saltenv]:
|
||||
for path in self.opts['pillar_roots'][saltenv]:
|
||||
for root, dirs, files in salt.utils.path.os_walk(
|
||||
os.path.join(path, prefix), followlinks=True
|
||||
):
|
||||
|
@ -912,14 +868,14 @@ class LocalClient(Client):
|
|||
|
||||
def file_list_emptydirs(self, saltenv='base', prefix=''):
|
||||
'''
|
||||
List the empty dirs in the file_roots
|
||||
List the empty dirs in the pillar_roots
|
||||
with optional relative prefix path to limit directory traversal
|
||||
'''
|
||||
ret = []
|
||||
prefix = prefix.strip('/')
|
||||
if saltenv not in self.opts['file_roots']:
|
||||
if saltenv not in self.opts['pillar_roots']:
|
||||
return ret
|
||||
for path in self.opts['file_roots'][saltenv]:
|
||||
for path in self.opts['pillar_roots'][saltenv]:
|
||||
for root, dirs, files in salt.utils.path.os_walk(
|
||||
os.path.join(path, prefix), followlinks=True
|
||||
):
|
||||
|
@ -931,14 +887,14 @@ class LocalClient(Client):
|
|||
|
||||
def dir_list(self, saltenv='base', prefix=''):
|
||||
'''
|
||||
List the dirs in the file_roots
|
||||
List the dirs in the pillar_roots
|
||||
with optional relative prefix path to limit directory traversal
|
||||
'''
|
||||
ret = []
|
||||
if saltenv not in self.opts['file_roots']:
|
||||
if saltenv not in self.opts['pillar_roots']:
|
||||
return ret
|
||||
prefix = prefix.strip('/')
|
||||
for path in self.opts['file_roots'][saltenv]:
|
||||
for path in self.opts['pillar_roots'][saltenv]:
|
||||
for root, dirs, files in salt.utils.path.os_walk(
|
||||
os.path.join(path, prefix), followlinks=True
|
||||
):
|
||||
|
@ -965,7 +921,7 @@ class LocalClient(Client):
|
|||
|
||||
def hash_file(self, path, saltenv='base'):
|
||||
'''
|
||||
Return the hash of a file, to get the hash of a file in the file_roots
|
||||
Return the hash of a file, to get the hash of a file in the pillar_roots
|
||||
prepend the path with salt://<file on server> otherwise, prepend the
|
||||
file with / for a local file.
|
||||
'''
|
||||
|
@ -988,7 +944,7 @@ class LocalClient(Client):
|
|||
|
||||
def hash_and_stat_file(self, path, saltenv='base'):
|
||||
'''
|
||||
Return the hash of a file, to get the hash of a file in the file_roots
|
||||
Return the hash of a file, to get the hash of a file in the pillar_roots
|
||||
prepend the path with salt://<file on server> otherwise, prepend the
|
||||
file with / for a local file.
|
||||
|
||||
|
@ -1034,7 +990,7 @@ class LocalClient(Client):
|
|||
Return the available environments
|
||||
'''
|
||||
ret = []
|
||||
for saltenv in self.opts['file_roots']:
|
||||
for saltenv in self.opts['pillar_roots']:
|
||||
ret.append(saltenv)
|
||||
return ret
|
||||
|
||||
|
@ -1428,6 +1384,11 @@ class FSClient(RemoteClient):
|
|||
self.auth = DumbAuth()
|
||||
|
||||
|
||||
# Provide backward compatibility for anyone directly using LocalClient (but no
|
||||
# one should be doing this).
|
||||
LocalClient = FSClient
|
||||
|
||||
|
||||
class DumbAuth(object):
|
||||
'''
|
||||
The dumbauth class is used to stub out auth calls fired from the FSClient
|
||||
|
|
|
@ -5,12 +5,13 @@ File server pluggable modules and generic backend functions
|
|||
|
||||
# Import python libs
|
||||
from __future__ import absolute_import, print_function, unicode_literals
|
||||
import collections
|
||||
|
||||
import errno
|
||||
import fnmatch
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
import time
|
||||
|
||||
# Import salt libs
|
||||
|
@ -23,6 +24,11 @@ import salt.utils.versions
|
|||
from salt.utils.args import get_function_argspec as _argspec
|
||||
from salt.utils.decorators import ensure_unicode_args
|
||||
|
||||
try:
|
||||
from collections.abc import Sequence
|
||||
except ImportError:
|
||||
from collections import Sequence
|
||||
|
||||
# Import 3rd-party libs
|
||||
from salt.ext import six
|
||||
|
||||
|
@ -125,16 +131,29 @@ def check_file_list_cache(opts, form, list_cache, w_lock):
|
|||
if os.path.exists(list_cache):
|
||||
# calculate filelist age is possible
|
||||
cache_stat = os.stat(list_cache)
|
||||
age = time.time() - cache_stat.st_mtime
|
||||
# st_time can have a greater precision than time, removing
|
||||
# float precision makes sure age will never be a negative
|
||||
# number.
|
||||
current_time = int(time.time())
|
||||
file_mtime = int(cache_stat.st_mtime)
|
||||
if file_mtime > current_time:
|
||||
log.debug(
|
||||
'Cache file modified time is in the future, ignoring. '
|
||||
'file=%s mtime=%s current_time=%s',
|
||||
list_cache, current_time, file_mtime
|
||||
)
|
||||
age = 0
|
||||
else:
|
||||
age = current_time - file_mtime
|
||||
else:
|
||||
# if filelist does not exists yet, mark it as expired
|
||||
age = opts.get('fileserver_list_cache_time', 20) + 1
|
||||
if age < opts.get('fileserver_list_cache_time', 20):
|
||||
# Young enough! Load this sucker up!
|
||||
with salt.utils.files.fopen(list_cache, 'rb') as fp_:
|
||||
log.trace(
|
||||
'Returning file_lists cache data from %s',
|
||||
list_cache
|
||||
log.debug(
|
||||
"Returning file list from cache: age=%s cache_time=%s %s",
|
||||
age, opts.get('fileserver_list_cache_time', 20), list_cache
|
||||
)
|
||||
return salt.utils.data.decode(serial.load(fp_).get(form, [])), False, False
|
||||
elif _lock_cache(w_lock):
|
||||
|
@ -187,12 +206,14 @@ def generate_mtime_map(opts, path_map):
|
|||
file_map = {}
|
||||
for saltenv, path_list in six.iteritems(path_map):
|
||||
for path in path_list:
|
||||
for directory, dirnames, filenames in salt.utils.path.os_walk(path):
|
||||
# Don't walk any directories that match file_ignore_regex or glob
|
||||
dirnames[:] = [d for d in dirnames if not is_file_ignored(opts, d)]
|
||||
for directory, _, filenames in salt.utils.path.os_walk(path):
|
||||
for item in filenames:
|
||||
try:
|
||||
file_path = os.path.join(directory, item)
|
||||
# Don't walk any directories that match
|
||||
# file_ignore_regex or glob
|
||||
if is_file_ignored(opts, file_path):
|
||||
continue
|
||||
file_map[file_path] = os.path.getmtime(file_path)
|
||||
except (OSError, IOError):
|
||||
# skip dangling symlinks
|
||||
|
@ -339,7 +360,7 @@ class Fileserver(object):
|
|||
except AttributeError:
|
||||
back = six.text_type(back).split(',')
|
||||
|
||||
if isinstance(back, collections.Sequence):
|
||||
if isinstance(back, Sequence):
|
||||
# The test suite uses an ImmutableList type (based on
|
||||
# collections.Sequence) for lists, which breaks this function in
|
||||
# the test suite. This normalizes the value from the opts into a
|
||||
|
@ -350,6 +371,10 @@ class Fileserver(object):
|
|||
if not isinstance(back, list):
|
||||
return ret
|
||||
|
||||
# Avoid error logging when performing lookups in the LazyDict by
|
||||
# instead doing the membership check on the result of a call to its
|
||||
# .keys() attribute rather than on the LaztDict itself.
|
||||
server_funcs = self.servers.keys()
|
||||
try:
|
||||
subtract_only = all((x.startswith('-') for x in back))
|
||||
except AttributeError:
|
||||
|
@ -359,16 +384,16 @@ class Fileserver(object):
|
|||
# Only subtracting backends from enabled ones
|
||||
ret = self.opts['fileserver_backend']
|
||||
for sub in back:
|
||||
if '{0}.envs'.format(sub[1:]) in self.servers:
|
||||
if '{0}.envs'.format(sub[1:]) in server_funcs:
|
||||
ret.remove(sub[1:])
|
||||
elif '{0}.envs'.format(sub[1:-2]) in self.servers:
|
||||
elif '{0}.envs'.format(sub[1:-2]) in server_funcs:
|
||||
ret.remove(sub[1:-2])
|
||||
return ret
|
||||
|
||||
for sub in back:
|
||||
if '{0}.envs'.format(sub) in self.servers:
|
||||
if '{0}.envs'.format(sub) in server_funcs:
|
||||
ret.append(sub)
|
||||
elif '{0}.envs'.format(sub[:-2]) in self.servers:
|
||||
elif '{0}.envs'.format(sub[:-2]) in server_funcs:
|
||||
ret.append(sub[:-2])
|
||||
return ret
|
||||
|
||||
|
@ -509,6 +534,15 @@ class Fileserver(object):
|
|||
return ret
|
||||
return list(ret)
|
||||
|
||||
def file_envs(self, load=None):
|
||||
'''
|
||||
Return environments for all backends for requests from fileclient
|
||||
'''
|
||||
if load is None:
|
||||
load = {}
|
||||
load.pop('cmd', None)
|
||||
return self.envs(**load)
|
||||
|
||||
def init(self, back=None):
|
||||
'''
|
||||
Initialize the backend, only do so if the fs supports an init function
|
||||
|
@ -863,8 +897,6 @@ class FSChan(object):
|
|||
cmd = load['cmd'].lstrip('_')
|
||||
if cmd in self.cmd_stub:
|
||||
return self.cmd_stub[cmd]
|
||||
if cmd == 'file_envs':
|
||||
return self.fs.envs()
|
||||
if not hasattr(self.fs, cmd):
|
||||
log.error('Malformed request, invalid cmd: %s', load)
|
||||
return {}
|
||||
|
|
|
@ -161,7 +161,7 @@ def update():
|
|||
old_mtime_map = {}
|
||||
# if you have an old map, load that
|
||||
if os.path.exists(mtime_map_path):
|
||||
with salt.utils.files.fopen(mtime_map_path, 'r') as fp_:
|
||||
with salt.utils.files.fopen(mtime_map_path, 'rb') as fp_:
|
||||
for line in fp_:
|
||||
line = salt.utils.stringutils.to_unicode(line)
|
||||
try:
|
||||
|
@ -189,10 +189,10 @@ def update():
|
|||
mtime_map_path_dir = os.path.dirname(mtime_map_path)
|
||||
if not os.path.exists(mtime_map_path_dir):
|
||||
os.makedirs(mtime_map_path_dir)
|
||||
with salt.utils.files.fopen(mtime_map_path, 'w') as fp_:
|
||||
with salt.utils.files.fopen(mtime_map_path, 'wb') as fp_:
|
||||
for file_path, mtime in six.iteritems(new_mtime_map):
|
||||
fp_.write(
|
||||
salt.utils.stringutils.to_str(
|
||||
salt.utils.stringutils.to_bytes(
|
||||
'{0}:{1}\n'.format(file_path, mtime)
|
||||
)
|
||||
)
|
||||
|
@ -240,7 +240,7 @@ def file_hash(load, fnd):
|
|||
# if we have a cache, serve that if the mtime hasn't changed
|
||||
if os.path.exists(cache_path):
|
||||
try:
|
||||
with salt.utils.files.fopen(cache_path, 'r') as fp_:
|
||||
with salt.utils.files.fopen(cache_path, 'rb') as fp_:
|
||||
try:
|
||||
hsum, mtime = salt.utils.stringutils.to_unicode(fp_.read()).split(':')
|
||||
except ValueError:
|
||||
|
|
|
@ -5,9 +5,11 @@ Generate chronos proxy minion grains.
|
|||
.. versionadded:: 2015.8.2
|
||||
|
||||
'''
|
||||
# Import Python libs
|
||||
from __future__ import absolute_import, print_function, unicode_literals
|
||||
|
||||
|
||||
# Import Salt libs
|
||||
import salt.utils.http
|
||||
import salt.utils.platform
|
||||
__proxyenabled__ = ['chronos']
|
||||
|
|
|
@ -46,6 +46,7 @@ import salt.utils.files
|
|||
import salt.utils.network
|
||||
import salt.utils.path
|
||||
import salt.utils.platform
|
||||
import salt.utils.pkg.rpm
|
||||
from salt.ext import six
|
||||
from salt.ext.six.moves import range
|
||||
|
||||
|
@ -609,6 +610,8 @@ def _windows_virtual(osdata):
|
|||
if osdata['kernel'] != 'Windows':
|
||||
return grains
|
||||
|
||||
grains['virtual'] = 'physical'
|
||||
|
||||
# It is possible that the 'manufacturer' and/or 'productname' grains
|
||||
# exist but have a value of None.
|
||||
manufacturer = osdata.get('manufacturer', '')
|
||||
|
@ -906,7 +909,7 @@ def _virtual(osdata):
|
|||
# Tested on CentOS 5.4 / 2.6.18-164.15.1.el5xen
|
||||
grains['virtual_subtype'] = 'Xen Dom0'
|
||||
else:
|
||||
if grains.get('productname', '') == 'HVM domU':
|
||||
if osdata.get('productname', '') == 'HVM domU':
|
||||
# Requires dmidecode!
|
||||
grains['virtual_subtype'] = 'Xen HVM DomU'
|
||||
elif os.path.isfile('/proc/xen/capabilities') and \
|
||||
|
@ -923,9 +926,8 @@ def _virtual(osdata):
|
|||
elif isdir('/sys/bus/xen'):
|
||||
if 'xen:' in __salt__['cmd.run']('dmesg').lower():
|
||||
grains['virtual_subtype'] = 'Xen PV DomU'
|
||||
elif os.listdir('/sys/bus/xen/drivers'):
|
||||
# An actual DomU will have several drivers
|
||||
# whereas a paravirt ops kernel will not.
|
||||
elif os.path.isfile('/sys/bus/xen/drivers/xenconsole'):
|
||||
# An actual DomU will have the xenconsole driver
|
||||
grains['virtual_subtype'] = 'Xen PV DomU'
|
||||
# If a Dom0 or DomU was detected, obviously this is xen
|
||||
if 'dom' in grains.get('virtual_subtype', '').lower():
|
||||
|
@ -1269,6 +1271,7 @@ def id_():
|
|||
'''
|
||||
return {'id': __opts__.get('id', '')}
|
||||
|
||||
|
||||
_REPLACE_LINUX_RE = re.compile(r'\W(?:gnu/)?linux', re.IGNORECASE)
|
||||
|
||||
# This maps (at most) the first ten characters (no spaces, lowercased) of
|
||||
|
@ -1326,6 +1329,7 @@ _OS_FAMILY_MAP = {
|
|||
'OVS': 'RedHat',
|
||||
'OEL': 'RedHat',
|
||||
'XCP': 'RedHat',
|
||||
'XCP-ng': 'RedHat',
|
||||
'XenServer': 'RedHat',
|
||||
'RES': 'RedHat',
|
||||
'Sangoma': 'RedHat',
|
||||
|
@ -1360,6 +1364,7 @@ _OS_FAMILY_MAP = {
|
|||
'GCEL': 'Debian',
|
||||
'Linaro': 'Debian',
|
||||
'elementary OS': 'Debian',
|
||||
'elementary': 'Debian',
|
||||
'Univention': 'Debian',
|
||||
'ScientificLinux': 'RedHat',
|
||||
'Raspbian': 'Debian',
|
||||
|
@ -1466,6 +1471,34 @@ def _parse_os_release(*os_release_files):
|
|||
return ret
|
||||
|
||||
|
||||
def _parse_cpe_name(cpe):
|
||||
'''
|
||||
Parse CPE_NAME data from the os-release
|
||||
|
||||
Info: https://csrc.nist.gov/projects/security-content-automation-protocol/scap-specifications/cpe
|
||||
|
||||
:param cpe:
|
||||
:return:
|
||||
'''
|
||||
part = {
|
||||
'o': 'operating system',
|
||||
'h': 'hardware',
|
||||
'a': 'application',
|
||||
}
|
||||
ret = {}
|
||||
cpe = (cpe or '').split(':')
|
||||
if len(cpe) > 4 and cpe[0] == 'cpe':
|
||||
if cpe[1].startswith('/'): # WFN to URI
|
||||
ret['vendor'], ret['product'], ret['version'] = cpe[2:5]
|
||||
ret['phase'] = cpe[5] if len(cpe) > 5 else None
|
||||
ret['part'] = part.get(cpe[1][1:])
|
||||
elif len(cpe) == 13 and cpe[1] == '2.3': # WFN to a string
|
||||
ret['vendor'], ret['product'], ret['version'], ret['phase'] = [x if x != '*' else None for x in cpe[3:7]]
|
||||
ret['part'] = part.get(cpe[2])
|
||||
|
||||
return ret
|
||||
|
||||
|
||||
def os_data():
|
||||
'''
|
||||
Return grains pertaining to the operating system
|
||||
|
@ -1657,13 +1690,20 @@ def os_data():
|
|||
codename = codename_match.group(1)
|
||||
grains['lsb_distrib_codename'] = codename
|
||||
if 'CPE_NAME' in os_release:
|
||||
if ":suse:" in os_release['CPE_NAME'] or ":opensuse:" in os_release['CPE_NAME']:
|
||||
cpe = _parse_cpe_name(os_release['CPE_NAME'])
|
||||
if not cpe:
|
||||
log.error('Broken CPE_NAME format in /etc/os-release!')
|
||||
elif cpe.get('vendor', '').lower() in ['suse', 'opensuse']:
|
||||
grains['os'] = "SUSE"
|
||||
# openSUSE `osfullname` grain normalization
|
||||
if os_release.get("NAME") == "openSUSE Leap":
|
||||
grains['osfullname'] = "Leap"
|
||||
elif os_release.get("VERSION") == "Tumbleweed":
|
||||
grains['osfullname'] = os_release["VERSION"]
|
||||
# Override VERSION_ID, if CPE_NAME around
|
||||
if cpe.get('version') and cpe.get('vendor') == 'opensuse': # Keep VERSION_ID for SLES
|
||||
grains['lsb_distrib_release'] = cpe['version']
|
||||
|
||||
elif os.path.isfile('/etc/SuSE-release'):
|
||||
log.trace('Parsing distrib info from /etc/SuSE-release')
|
||||
grains['lsb_distrib_id'] = 'SUSE'
|
||||
|
@ -1769,8 +1809,7 @@ def os_data():
|
|||
# Commit introducing this comment should be reverted after the upstream bug is released.
|
||||
if 'CentOS Linux 7' in grains.get('lsb_distrib_codename', ''):
|
||||
grains.pop('lsb_distrib_release', None)
|
||||
grains['osrelease'] = \
|
||||
grains.get('lsb_distrib_release', osrelease).strip()
|
||||
grains['osrelease'] = grains.get('lsb_distrib_release', osrelease).strip()
|
||||
grains['oscodename'] = grains.get('lsb_distrib_codename', '').strip() or oscodename
|
||||
if 'Red Hat' in grains['oscodename']:
|
||||
grains['oscodename'] = oscodename
|
||||
|
@ -1808,8 +1847,7 @@ def os_data():
|
|||
r'((?:Open|Oracle )?Solaris|OpenIndiana|OmniOS) (Development)?'
|
||||
r'\s*(\d+\.?\d*|v\d+)\s?[A-Z]*\s?(r\d+|\d+\/\d+|oi_\S+|snv_\S+)?'
|
||||
)
|
||||
osname, development, osmajorrelease, osminorrelease = \
|
||||
release_re.search(rel_data).groups()
|
||||
osname, development, osmajorrelease, osminorrelease = release_re.search(rel_data).groups()
|
||||
except AttributeError:
|
||||
# Set a blank osrelease grain and fallback to 'Solaris'
|
||||
# as the 'os' grain.
|
||||
|
@ -1896,8 +1934,8 @@ def os_data():
|
|||
# architecture.
|
||||
if grains.get('os_family') == 'Debian':
|
||||
osarch = __salt__['cmd.run']('dpkg --print-architecture').strip()
|
||||
elif grains.get('os_family') == 'RedHat':
|
||||
osarch = __salt__['cmd.run']('rpm --eval %{_host_cpu}').strip()
|
||||
elif grains.get('os_family') in ['RedHat', 'Suse']:
|
||||
osarch = salt.utils.pkg.rpm.get_osarch()
|
||||
elif grains.get('os_family') in ('NILinuxRT', 'Poky'):
|
||||
archinfo = {}
|
||||
for line in __salt__['cmd.run']('opkg print-architecture').splitlines():
|
||||
|
@ -2515,7 +2553,7 @@ def _hw_data(osdata):
|
|||
break
|
||||
elif osdata['kernel'] == 'AIX':
|
||||
cmd = salt.utils.path.which('prtconf')
|
||||
if data:
|
||||
if cmd:
|
||||
data = __salt__['cmd.run']('{0}'.format(cmd)) + os.linesep
|
||||
for dest, regstring in (('serialnumber', r'(?im)^\s*Machine\s+Serial\s+Number:\s+(\S+)'),
|
||||
('systemfirmware', r'(?im)^\s*Firmware\s+Version:\s+(.*)')):
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
'''
|
||||
Generate baseline proxy minion grains for ESXi hosts.
|
||||
|
||||
., versionadded:: 2015.8.4
|
||||
.. versionadded:: 2015.8.4
|
||||
|
||||
'''
|
||||
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Reference in a new issue