mirror of
https://github.com/saltstack/salt.git
synced 2025-04-17 10:10:20 +00:00
Merge 3006.x into master
This commit is contained in:
commit
e7b5f24ab3
51 changed files with 2505 additions and 1871 deletions
58
.github/workflows/ci.yml
vendored
58
.github/workflows/ci.yml
vendored
|
@ -1868,6 +1868,8 @@ jobs:
|
|||
cache-prefix: ${{ needs.prepare-workflow.outputs.cache-seed }}|3.10.13
|
||||
skip-code-coverage: ${{ fromJSON(needs.prepare-workflow.outputs.testrun)['skip_code_coverage'] }}
|
||||
skip-junit-reports: ${{ github.event_name == 'pull_request' }}
|
||||
workflow-slug: ci
|
||||
default-timeout: 180
|
||||
|
||||
windows-2019:
|
||||
name: Windows 2019 Test
|
||||
|
@ -1888,6 +1890,8 @@ jobs:
|
|||
cache-prefix: ${{ needs.prepare-workflow.outputs.cache-seed }}|3.10.13
|
||||
skip-code-coverage: ${{ fromJSON(needs.prepare-workflow.outputs.testrun)['skip_code_coverage'] }}
|
||||
skip-junit-reports: ${{ github.event_name == 'pull_request' }}
|
||||
workflow-slug: ci
|
||||
default-timeout: 180
|
||||
|
||||
windows-2022:
|
||||
name: Windows 2022 Test
|
||||
|
@ -1908,6 +1912,8 @@ jobs:
|
|||
cache-prefix: ${{ needs.prepare-workflow.outputs.cache-seed }}|3.10.13
|
||||
skip-code-coverage: ${{ fromJSON(needs.prepare-workflow.outputs.testrun)['skip_code_coverage'] }}
|
||||
skip-junit-reports: ${{ github.event_name == 'pull_request' }}
|
||||
workflow-slug: ci
|
||||
default-timeout: 180
|
||||
|
||||
macos-12:
|
||||
name: macOS 12 Test
|
||||
|
@ -1928,6 +1934,8 @@ jobs:
|
|||
cache-prefix: ${{ needs.prepare-workflow.outputs.cache-seed }}|3.10.13
|
||||
skip-code-coverage: ${{ fromJSON(needs.prepare-workflow.outputs.testrun)['skip_code_coverage'] }}
|
||||
skip-junit-reports: ${{ github.event_name == 'pull_request' }}
|
||||
workflow-slug: ci
|
||||
default-timeout: 180
|
||||
|
||||
almalinux-8:
|
||||
name: Alma Linux 8 Test
|
||||
|
@ -1948,6 +1956,8 @@ jobs:
|
|||
cache-prefix: ${{ needs.prepare-workflow.outputs.cache-seed }}|3.10.13
|
||||
skip-code-coverage: ${{ fromJSON(needs.prepare-workflow.outputs.testrun)['skip_code_coverage'] }}
|
||||
skip-junit-reports: ${{ github.event_name == 'pull_request' }}
|
||||
workflow-slug: ci
|
||||
default-timeout: 180
|
||||
|
||||
almalinux-9:
|
||||
name: Alma Linux 9 Test
|
||||
|
@ -1968,6 +1978,8 @@ jobs:
|
|||
cache-prefix: ${{ needs.prepare-workflow.outputs.cache-seed }}|3.10.13
|
||||
skip-code-coverage: ${{ fromJSON(needs.prepare-workflow.outputs.testrun)['skip_code_coverage'] }}
|
||||
skip-junit-reports: ${{ github.event_name == 'pull_request' }}
|
||||
workflow-slug: ci
|
||||
default-timeout: 180
|
||||
|
||||
amazonlinux-2:
|
||||
name: Amazon Linux 2 Test
|
||||
|
@ -1988,6 +2000,8 @@ jobs:
|
|||
cache-prefix: ${{ needs.prepare-workflow.outputs.cache-seed }}|3.10.13
|
||||
skip-code-coverage: ${{ fromJSON(needs.prepare-workflow.outputs.testrun)['skip_code_coverage'] }}
|
||||
skip-junit-reports: ${{ github.event_name == 'pull_request' }}
|
||||
workflow-slug: ci
|
||||
default-timeout: 180
|
||||
|
||||
archlinux-lts:
|
||||
name: Arch Linux LTS Test
|
||||
|
@ -2008,6 +2022,8 @@ jobs:
|
|||
cache-prefix: ${{ needs.prepare-workflow.outputs.cache-seed }}|3.10.13
|
||||
skip-code-coverage: ${{ fromJSON(needs.prepare-workflow.outputs.testrun)['skip_code_coverage'] }}
|
||||
skip-junit-reports: ${{ github.event_name == 'pull_request' }}
|
||||
workflow-slug: ci
|
||||
default-timeout: 180
|
||||
|
||||
centos-7:
|
||||
name: CentOS 7 Test
|
||||
|
@ -2028,6 +2044,8 @@ jobs:
|
|||
cache-prefix: ${{ needs.prepare-workflow.outputs.cache-seed }}|3.10.13
|
||||
skip-code-coverage: ${{ fromJSON(needs.prepare-workflow.outputs.testrun)['skip_code_coverage'] }}
|
||||
skip-junit-reports: ${{ github.event_name == 'pull_request' }}
|
||||
workflow-slug: ci
|
||||
default-timeout: 180
|
||||
|
||||
centosstream-8:
|
||||
name: CentOS Stream 8 Test
|
||||
|
@ -2048,6 +2066,8 @@ jobs:
|
|||
cache-prefix: ${{ needs.prepare-workflow.outputs.cache-seed }}|3.10.13
|
||||
skip-code-coverage: ${{ fromJSON(needs.prepare-workflow.outputs.testrun)['skip_code_coverage'] }}
|
||||
skip-junit-reports: ${{ github.event_name == 'pull_request' }}
|
||||
workflow-slug: ci
|
||||
default-timeout: 180
|
||||
|
||||
centosstream-9:
|
||||
name: CentOS Stream 9 Test
|
||||
|
@ -2068,6 +2088,8 @@ jobs:
|
|||
cache-prefix: ${{ needs.prepare-workflow.outputs.cache-seed }}|3.10.13
|
||||
skip-code-coverage: ${{ fromJSON(needs.prepare-workflow.outputs.testrun)['skip_code_coverage'] }}
|
||||
skip-junit-reports: ${{ github.event_name == 'pull_request' }}
|
||||
workflow-slug: ci
|
||||
default-timeout: 180
|
||||
|
||||
debian-10:
|
||||
name: Debian 10 Test
|
||||
|
@ -2088,6 +2110,8 @@ jobs:
|
|||
cache-prefix: ${{ needs.prepare-workflow.outputs.cache-seed }}|3.10.13
|
||||
skip-code-coverage: ${{ fromJSON(needs.prepare-workflow.outputs.testrun)['skip_code_coverage'] }}
|
||||
skip-junit-reports: ${{ github.event_name == 'pull_request' }}
|
||||
workflow-slug: ci
|
||||
default-timeout: 180
|
||||
|
||||
debian-11:
|
||||
name: Debian 11 Test
|
||||
|
@ -2108,6 +2132,8 @@ jobs:
|
|||
cache-prefix: ${{ needs.prepare-workflow.outputs.cache-seed }}|3.10.13
|
||||
skip-code-coverage: ${{ fromJSON(needs.prepare-workflow.outputs.testrun)['skip_code_coverage'] }}
|
||||
skip-junit-reports: ${{ github.event_name == 'pull_request' }}
|
||||
workflow-slug: ci
|
||||
default-timeout: 180
|
||||
|
||||
debian-11-arm64:
|
||||
name: Debian 11 Arm64 Test
|
||||
|
@ -2128,6 +2154,8 @@ jobs:
|
|||
cache-prefix: ${{ needs.prepare-workflow.outputs.cache-seed }}|3.10.13
|
||||
skip-code-coverage: ${{ fromJSON(needs.prepare-workflow.outputs.testrun)['skip_code_coverage'] }}
|
||||
skip-junit-reports: ${{ github.event_name == 'pull_request' }}
|
||||
workflow-slug: ci
|
||||
default-timeout: 180
|
||||
|
||||
debian-12:
|
||||
name: Debian 12 Test
|
||||
|
@ -2148,6 +2176,8 @@ jobs:
|
|||
cache-prefix: ${{ needs.prepare-workflow.outputs.cache-seed }}|3.10.13
|
||||
skip-code-coverage: ${{ fromJSON(needs.prepare-workflow.outputs.testrun)['skip_code_coverage'] }}
|
||||
skip-junit-reports: ${{ github.event_name == 'pull_request' }}
|
||||
workflow-slug: ci
|
||||
default-timeout: 180
|
||||
|
||||
debian-12-arm64:
|
||||
name: Debian 12 Arm64 Test
|
||||
|
@ -2168,6 +2198,8 @@ jobs:
|
|||
cache-prefix: ${{ needs.prepare-workflow.outputs.cache-seed }}|3.10.13
|
||||
skip-code-coverage: ${{ fromJSON(needs.prepare-workflow.outputs.testrun)['skip_code_coverage'] }}
|
||||
skip-junit-reports: ${{ github.event_name == 'pull_request' }}
|
||||
workflow-slug: ci
|
||||
default-timeout: 180
|
||||
|
||||
fedora-37:
|
||||
name: Fedora 37 Test
|
||||
|
@ -2188,6 +2220,8 @@ jobs:
|
|||
cache-prefix: ${{ needs.prepare-workflow.outputs.cache-seed }}|3.10.13
|
||||
skip-code-coverage: ${{ fromJSON(needs.prepare-workflow.outputs.testrun)['skip_code_coverage'] }}
|
||||
skip-junit-reports: ${{ github.event_name == 'pull_request' }}
|
||||
workflow-slug: ci
|
||||
default-timeout: 180
|
||||
|
||||
fedora-38:
|
||||
name: Fedora 38 Test
|
||||
|
@ -2208,6 +2242,8 @@ jobs:
|
|||
cache-prefix: ${{ needs.prepare-workflow.outputs.cache-seed }}|3.10.13
|
||||
skip-code-coverage: ${{ fromJSON(needs.prepare-workflow.outputs.testrun)['skip_code_coverage'] }}
|
||||
skip-junit-reports: ${{ github.event_name == 'pull_request' }}
|
||||
workflow-slug: ci
|
||||
default-timeout: 180
|
||||
|
||||
opensuse-15:
|
||||
name: Opensuse 15 Test
|
||||
|
@ -2228,6 +2264,8 @@ jobs:
|
|||
cache-prefix: ${{ needs.prepare-workflow.outputs.cache-seed }}|3.10.13
|
||||
skip-code-coverage: ${{ fromJSON(needs.prepare-workflow.outputs.testrun)['skip_code_coverage'] }}
|
||||
skip-junit-reports: ${{ github.event_name == 'pull_request' }}
|
||||
workflow-slug: ci
|
||||
default-timeout: 180
|
||||
|
||||
photonos-3:
|
||||
name: Photon OS 3 Test
|
||||
|
@ -2248,6 +2286,8 @@ jobs:
|
|||
cache-prefix: ${{ needs.prepare-workflow.outputs.cache-seed }}|3.10.13
|
||||
skip-code-coverage: ${{ fromJSON(needs.prepare-workflow.outputs.testrun)['skip_code_coverage'] }}
|
||||
skip-junit-reports: ${{ github.event_name == 'pull_request' }}
|
||||
workflow-slug: ci
|
||||
default-timeout: 180
|
||||
|
||||
photonos-3-arm64:
|
||||
name: Photon OS 3 Arm64 Test
|
||||
|
@ -2268,6 +2308,8 @@ jobs:
|
|||
cache-prefix: ${{ needs.prepare-workflow.outputs.cache-seed }}|3.10.13
|
||||
skip-code-coverage: ${{ fromJSON(needs.prepare-workflow.outputs.testrun)['skip_code_coverage'] }}
|
||||
skip-junit-reports: ${{ github.event_name == 'pull_request' }}
|
||||
workflow-slug: ci
|
||||
default-timeout: 180
|
||||
|
||||
photonos-4:
|
||||
name: Photon OS 4 Test
|
||||
|
@ -2288,6 +2330,8 @@ jobs:
|
|||
cache-prefix: ${{ needs.prepare-workflow.outputs.cache-seed }}|3.10.13
|
||||
skip-code-coverage: ${{ fromJSON(needs.prepare-workflow.outputs.testrun)['skip_code_coverage'] }}
|
||||
skip-junit-reports: ${{ github.event_name == 'pull_request' }}
|
||||
workflow-slug: ci
|
||||
default-timeout: 180
|
||||
|
||||
photonos-4-arm64:
|
||||
name: Photon OS 4 Arm64 Test
|
||||
|
@ -2308,6 +2352,8 @@ jobs:
|
|||
cache-prefix: ${{ needs.prepare-workflow.outputs.cache-seed }}|3.10.13
|
||||
skip-code-coverage: ${{ fromJSON(needs.prepare-workflow.outputs.testrun)['skip_code_coverage'] }}
|
||||
skip-junit-reports: ${{ github.event_name == 'pull_request' }}
|
||||
workflow-slug: ci
|
||||
default-timeout: 180
|
||||
|
||||
photonos-5:
|
||||
name: Photon OS 5 Test
|
||||
|
@ -2328,6 +2374,8 @@ jobs:
|
|||
cache-prefix: ${{ needs.prepare-workflow.outputs.cache-seed }}|3.10.13
|
||||
skip-code-coverage: ${{ fromJSON(needs.prepare-workflow.outputs.testrun)['skip_code_coverage'] }}
|
||||
skip-junit-reports: ${{ github.event_name == 'pull_request' }}
|
||||
workflow-slug: ci
|
||||
default-timeout: 180
|
||||
|
||||
photonos-5-arm64:
|
||||
name: Photon OS 5 Arm64 Test
|
||||
|
@ -2348,6 +2396,8 @@ jobs:
|
|||
cache-prefix: ${{ needs.prepare-workflow.outputs.cache-seed }}|3.10.13
|
||||
skip-code-coverage: ${{ fromJSON(needs.prepare-workflow.outputs.testrun)['skip_code_coverage'] }}
|
||||
skip-junit-reports: ${{ github.event_name == 'pull_request' }}
|
||||
workflow-slug: ci
|
||||
default-timeout: 180
|
||||
|
||||
ubuntu-2004:
|
||||
name: Ubuntu 20.04 Test
|
||||
|
@ -2368,6 +2418,8 @@ jobs:
|
|||
cache-prefix: ${{ needs.prepare-workflow.outputs.cache-seed }}|3.10.13
|
||||
skip-code-coverage: ${{ fromJSON(needs.prepare-workflow.outputs.testrun)['skip_code_coverage'] }}
|
||||
skip-junit-reports: ${{ github.event_name == 'pull_request' }}
|
||||
workflow-slug: ci
|
||||
default-timeout: 180
|
||||
|
||||
ubuntu-2004-arm64:
|
||||
name: Ubuntu 20.04 Arm64 Test
|
||||
|
@ -2388,6 +2440,8 @@ jobs:
|
|||
cache-prefix: ${{ needs.prepare-workflow.outputs.cache-seed }}|3.10.13
|
||||
skip-code-coverage: ${{ fromJSON(needs.prepare-workflow.outputs.testrun)['skip_code_coverage'] }}
|
||||
skip-junit-reports: ${{ github.event_name == 'pull_request' }}
|
||||
workflow-slug: ci
|
||||
default-timeout: 180
|
||||
|
||||
ubuntu-2204:
|
||||
name: Ubuntu 22.04 Test
|
||||
|
@ -2408,6 +2462,8 @@ jobs:
|
|||
cache-prefix: ${{ needs.prepare-workflow.outputs.cache-seed }}|3.10.13
|
||||
skip-code-coverage: ${{ fromJSON(needs.prepare-workflow.outputs.testrun)['skip_code_coverage'] }}
|
||||
skip-junit-reports: ${{ github.event_name == 'pull_request' }}
|
||||
workflow-slug: ci
|
||||
default-timeout: 180
|
||||
|
||||
ubuntu-2204-arm64:
|
||||
name: Ubuntu 22.04 Arm64 Test
|
||||
|
@ -2428,6 +2484,8 @@ jobs:
|
|||
cache-prefix: ${{ needs.prepare-workflow.outputs.cache-seed }}|3.10.13
|
||||
skip-code-coverage: ${{ fromJSON(needs.prepare-workflow.outputs.testrun)['skip_code_coverage'] }}
|
||||
skip-junit-reports: ${{ github.event_name == 'pull_request' }}
|
||||
workflow-slug: ci
|
||||
default-timeout: 180
|
||||
|
||||
combine-all-code-coverage:
|
||||
name: Combine Code Coverage
|
||||
|
|
60
.github/workflows/nightly.yml
vendored
60
.github/workflows/nightly.yml
vendored
|
@ -18,7 +18,7 @@ on:
|
|||
description: Skip running the Salt packages test suite.
|
||||
schedule:
|
||||
# https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#onschedule
|
||||
- cron: '0 1 * * *' # Every day at 1AM
|
||||
- cron: '0 0 * * *' # Every day at 0AM
|
||||
|
||||
env:
|
||||
COLUMNS: 190
|
||||
|
@ -1924,6 +1924,8 @@ jobs:
|
|||
cache-prefix: ${{ needs.prepare-workflow.outputs.cache-seed }}|3.10.13
|
||||
skip-code-coverage: false
|
||||
skip-junit-reports: false
|
||||
workflow-slug: nightly
|
||||
default-timeout: 360
|
||||
|
||||
windows-2019:
|
||||
name: Windows 2019 Test
|
||||
|
@ -1944,6 +1946,8 @@ jobs:
|
|||
cache-prefix: ${{ needs.prepare-workflow.outputs.cache-seed }}|3.10.13
|
||||
skip-code-coverage: false
|
||||
skip-junit-reports: false
|
||||
workflow-slug: nightly
|
||||
default-timeout: 360
|
||||
|
||||
windows-2022:
|
||||
name: Windows 2022 Test
|
||||
|
@ -1964,6 +1968,8 @@ jobs:
|
|||
cache-prefix: ${{ needs.prepare-workflow.outputs.cache-seed }}|3.10.13
|
||||
skip-code-coverage: false
|
||||
skip-junit-reports: false
|
||||
workflow-slug: nightly
|
||||
default-timeout: 360
|
||||
|
||||
macos-12:
|
||||
name: macOS 12 Test
|
||||
|
@ -1984,6 +1990,8 @@ jobs:
|
|||
cache-prefix: ${{ needs.prepare-workflow.outputs.cache-seed }}|3.10.13
|
||||
skip-code-coverage: false
|
||||
skip-junit-reports: false
|
||||
workflow-slug: nightly
|
||||
default-timeout: 360
|
||||
|
||||
almalinux-8:
|
||||
name: Alma Linux 8 Test
|
||||
|
@ -2004,6 +2012,8 @@ jobs:
|
|||
cache-prefix: ${{ needs.prepare-workflow.outputs.cache-seed }}|3.10.13
|
||||
skip-code-coverage: false
|
||||
skip-junit-reports: false
|
||||
workflow-slug: nightly
|
||||
default-timeout: 360
|
||||
|
||||
almalinux-9:
|
||||
name: Alma Linux 9 Test
|
||||
|
@ -2024,6 +2034,8 @@ jobs:
|
|||
cache-prefix: ${{ needs.prepare-workflow.outputs.cache-seed }}|3.10.13
|
||||
skip-code-coverage: false
|
||||
skip-junit-reports: false
|
||||
workflow-slug: nightly
|
||||
default-timeout: 360
|
||||
|
||||
amazonlinux-2:
|
||||
name: Amazon Linux 2 Test
|
||||
|
@ -2044,6 +2056,8 @@ jobs:
|
|||
cache-prefix: ${{ needs.prepare-workflow.outputs.cache-seed }}|3.10.13
|
||||
skip-code-coverage: false
|
||||
skip-junit-reports: false
|
||||
workflow-slug: nightly
|
||||
default-timeout: 360
|
||||
|
||||
archlinux-lts:
|
||||
name: Arch Linux LTS Test
|
||||
|
@ -2064,6 +2078,8 @@ jobs:
|
|||
cache-prefix: ${{ needs.prepare-workflow.outputs.cache-seed }}|3.10.13
|
||||
skip-code-coverage: false
|
||||
skip-junit-reports: false
|
||||
workflow-slug: nightly
|
||||
default-timeout: 360
|
||||
|
||||
centos-7:
|
||||
name: CentOS 7 Test
|
||||
|
@ -2084,6 +2100,8 @@ jobs:
|
|||
cache-prefix: ${{ needs.prepare-workflow.outputs.cache-seed }}|3.10.13
|
||||
skip-code-coverage: false
|
||||
skip-junit-reports: false
|
||||
workflow-slug: nightly
|
||||
default-timeout: 360
|
||||
|
||||
centosstream-8:
|
||||
name: CentOS Stream 8 Test
|
||||
|
@ -2104,6 +2122,8 @@ jobs:
|
|||
cache-prefix: ${{ needs.prepare-workflow.outputs.cache-seed }}|3.10.13
|
||||
skip-code-coverage: false
|
||||
skip-junit-reports: false
|
||||
workflow-slug: nightly
|
||||
default-timeout: 360
|
||||
|
||||
centosstream-9:
|
||||
name: CentOS Stream 9 Test
|
||||
|
@ -2124,6 +2144,8 @@ jobs:
|
|||
cache-prefix: ${{ needs.prepare-workflow.outputs.cache-seed }}|3.10.13
|
||||
skip-code-coverage: false
|
||||
skip-junit-reports: false
|
||||
workflow-slug: nightly
|
||||
default-timeout: 360
|
||||
|
||||
debian-10:
|
||||
name: Debian 10 Test
|
||||
|
@ -2144,6 +2166,8 @@ jobs:
|
|||
cache-prefix: ${{ needs.prepare-workflow.outputs.cache-seed }}|3.10.13
|
||||
skip-code-coverage: false
|
||||
skip-junit-reports: false
|
||||
workflow-slug: nightly
|
||||
default-timeout: 360
|
||||
|
||||
debian-11:
|
||||
name: Debian 11 Test
|
||||
|
@ -2164,6 +2188,8 @@ jobs:
|
|||
cache-prefix: ${{ needs.prepare-workflow.outputs.cache-seed }}|3.10.13
|
||||
skip-code-coverage: false
|
||||
skip-junit-reports: false
|
||||
workflow-slug: nightly
|
||||
default-timeout: 360
|
||||
|
||||
debian-11-arm64:
|
||||
name: Debian 11 Arm64 Test
|
||||
|
@ -2184,6 +2210,8 @@ jobs:
|
|||
cache-prefix: ${{ needs.prepare-workflow.outputs.cache-seed }}|3.10.13
|
||||
skip-code-coverage: false
|
||||
skip-junit-reports: false
|
||||
workflow-slug: nightly
|
||||
default-timeout: 360
|
||||
|
||||
debian-12:
|
||||
name: Debian 12 Test
|
||||
|
@ -2204,6 +2232,8 @@ jobs:
|
|||
cache-prefix: ${{ needs.prepare-workflow.outputs.cache-seed }}|3.10.13
|
||||
skip-code-coverage: false
|
||||
skip-junit-reports: false
|
||||
workflow-slug: nightly
|
||||
default-timeout: 360
|
||||
|
||||
debian-12-arm64:
|
||||
name: Debian 12 Arm64 Test
|
||||
|
@ -2224,6 +2254,8 @@ jobs:
|
|||
cache-prefix: ${{ needs.prepare-workflow.outputs.cache-seed }}|3.10.13
|
||||
skip-code-coverage: false
|
||||
skip-junit-reports: false
|
||||
workflow-slug: nightly
|
||||
default-timeout: 360
|
||||
|
||||
fedora-37:
|
||||
name: Fedora 37 Test
|
||||
|
@ -2244,6 +2276,8 @@ jobs:
|
|||
cache-prefix: ${{ needs.prepare-workflow.outputs.cache-seed }}|3.10.13
|
||||
skip-code-coverage: false
|
||||
skip-junit-reports: false
|
||||
workflow-slug: nightly
|
||||
default-timeout: 360
|
||||
|
||||
fedora-38:
|
||||
name: Fedora 38 Test
|
||||
|
@ -2264,6 +2298,8 @@ jobs:
|
|||
cache-prefix: ${{ needs.prepare-workflow.outputs.cache-seed }}|3.10.13
|
||||
skip-code-coverage: false
|
||||
skip-junit-reports: false
|
||||
workflow-slug: nightly
|
||||
default-timeout: 360
|
||||
|
||||
opensuse-15:
|
||||
name: Opensuse 15 Test
|
||||
|
@ -2284,6 +2320,8 @@ jobs:
|
|||
cache-prefix: ${{ needs.prepare-workflow.outputs.cache-seed }}|3.10.13
|
||||
skip-code-coverage: false
|
||||
skip-junit-reports: false
|
||||
workflow-slug: nightly
|
||||
default-timeout: 360
|
||||
|
||||
photonos-3:
|
||||
name: Photon OS 3 Test
|
||||
|
@ -2304,6 +2342,8 @@ jobs:
|
|||
cache-prefix: ${{ needs.prepare-workflow.outputs.cache-seed }}|3.10.13
|
||||
skip-code-coverage: false
|
||||
skip-junit-reports: false
|
||||
workflow-slug: nightly
|
||||
default-timeout: 360
|
||||
|
||||
photonos-3-arm64:
|
||||
name: Photon OS 3 Arm64 Test
|
||||
|
@ -2324,6 +2364,8 @@ jobs:
|
|||
cache-prefix: ${{ needs.prepare-workflow.outputs.cache-seed }}|3.10.13
|
||||
skip-code-coverage: false
|
||||
skip-junit-reports: false
|
||||
workflow-slug: nightly
|
||||
default-timeout: 360
|
||||
|
||||
photonos-4:
|
||||
name: Photon OS 4 Test
|
||||
|
@ -2344,6 +2386,8 @@ jobs:
|
|||
cache-prefix: ${{ needs.prepare-workflow.outputs.cache-seed }}|3.10.13
|
||||
skip-code-coverage: false
|
||||
skip-junit-reports: false
|
||||
workflow-slug: nightly
|
||||
default-timeout: 360
|
||||
|
||||
photonos-4-arm64:
|
||||
name: Photon OS 4 Arm64 Test
|
||||
|
@ -2364,6 +2408,8 @@ jobs:
|
|||
cache-prefix: ${{ needs.prepare-workflow.outputs.cache-seed }}|3.10.13
|
||||
skip-code-coverage: false
|
||||
skip-junit-reports: false
|
||||
workflow-slug: nightly
|
||||
default-timeout: 360
|
||||
|
||||
photonos-5:
|
||||
name: Photon OS 5 Test
|
||||
|
@ -2384,6 +2430,8 @@ jobs:
|
|||
cache-prefix: ${{ needs.prepare-workflow.outputs.cache-seed }}|3.10.13
|
||||
skip-code-coverage: false
|
||||
skip-junit-reports: false
|
||||
workflow-slug: nightly
|
||||
default-timeout: 360
|
||||
|
||||
photonos-5-arm64:
|
||||
name: Photon OS 5 Arm64 Test
|
||||
|
@ -2404,6 +2452,8 @@ jobs:
|
|||
cache-prefix: ${{ needs.prepare-workflow.outputs.cache-seed }}|3.10.13
|
||||
skip-code-coverage: false
|
||||
skip-junit-reports: false
|
||||
workflow-slug: nightly
|
||||
default-timeout: 360
|
||||
|
||||
ubuntu-2004:
|
||||
name: Ubuntu 20.04 Test
|
||||
|
@ -2424,6 +2474,8 @@ jobs:
|
|||
cache-prefix: ${{ needs.prepare-workflow.outputs.cache-seed }}|3.10.13
|
||||
skip-code-coverage: false
|
||||
skip-junit-reports: false
|
||||
workflow-slug: nightly
|
||||
default-timeout: 360
|
||||
|
||||
ubuntu-2004-arm64:
|
||||
name: Ubuntu 20.04 Arm64 Test
|
||||
|
@ -2444,6 +2496,8 @@ jobs:
|
|||
cache-prefix: ${{ needs.prepare-workflow.outputs.cache-seed }}|3.10.13
|
||||
skip-code-coverage: false
|
||||
skip-junit-reports: false
|
||||
workflow-slug: nightly
|
||||
default-timeout: 360
|
||||
|
||||
ubuntu-2204:
|
||||
name: Ubuntu 22.04 Test
|
||||
|
@ -2464,6 +2518,8 @@ jobs:
|
|||
cache-prefix: ${{ needs.prepare-workflow.outputs.cache-seed }}|3.10.13
|
||||
skip-code-coverage: false
|
||||
skip-junit-reports: false
|
||||
workflow-slug: nightly
|
||||
default-timeout: 360
|
||||
|
||||
ubuntu-2204-arm64:
|
||||
name: Ubuntu 22.04 Arm64 Test
|
||||
|
@ -2484,6 +2540,8 @@ jobs:
|
|||
cache-prefix: ${{ needs.prepare-workflow.outputs.cache-seed }}|3.10.13
|
||||
skip-code-coverage: false
|
||||
skip-junit-reports: false
|
||||
workflow-slug: nightly
|
||||
default-timeout: 360
|
||||
|
||||
combine-all-code-coverage:
|
||||
name: Combine Code Coverage
|
||||
|
|
1
.github/workflows/release.yml
vendored
1
.github/workflows/release.yml
vendored
|
@ -116,7 +116,6 @@ jobs:
|
|||
|
||||
download-onedir-artifact:
|
||||
name: Download Staging Onedir Artifact
|
||||
if: ${{ inputs.skip-salt-pkg-download-test-suite == false }}
|
||||
runs-on:
|
||||
- self-hosted
|
||||
- linux
|
||||
|
|
58
.github/workflows/scheduled.yml
vendored
58
.github/workflows/scheduled.yml
vendored
|
@ -1902,6 +1902,8 @@ jobs:
|
|||
cache-prefix: ${{ needs.prepare-workflow.outputs.cache-seed }}|3.10.13
|
||||
skip-code-coverage: false
|
||||
skip-junit-reports: false
|
||||
workflow-slug: scheduled
|
||||
default-timeout: 360
|
||||
|
||||
windows-2019:
|
||||
name: Windows 2019 Test
|
||||
|
@ -1922,6 +1924,8 @@ jobs:
|
|||
cache-prefix: ${{ needs.prepare-workflow.outputs.cache-seed }}|3.10.13
|
||||
skip-code-coverage: false
|
||||
skip-junit-reports: false
|
||||
workflow-slug: scheduled
|
||||
default-timeout: 360
|
||||
|
||||
windows-2022:
|
||||
name: Windows 2022 Test
|
||||
|
@ -1942,6 +1946,8 @@ jobs:
|
|||
cache-prefix: ${{ needs.prepare-workflow.outputs.cache-seed }}|3.10.13
|
||||
skip-code-coverage: false
|
||||
skip-junit-reports: false
|
||||
workflow-slug: scheduled
|
||||
default-timeout: 360
|
||||
|
||||
macos-12:
|
||||
name: macOS 12 Test
|
||||
|
@ -1962,6 +1968,8 @@ jobs:
|
|||
cache-prefix: ${{ needs.prepare-workflow.outputs.cache-seed }}|3.10.13
|
||||
skip-code-coverage: false
|
||||
skip-junit-reports: false
|
||||
workflow-slug: scheduled
|
||||
default-timeout: 360
|
||||
|
||||
almalinux-8:
|
||||
name: Alma Linux 8 Test
|
||||
|
@ -1982,6 +1990,8 @@ jobs:
|
|||
cache-prefix: ${{ needs.prepare-workflow.outputs.cache-seed }}|3.10.13
|
||||
skip-code-coverage: false
|
||||
skip-junit-reports: false
|
||||
workflow-slug: scheduled
|
||||
default-timeout: 360
|
||||
|
||||
almalinux-9:
|
||||
name: Alma Linux 9 Test
|
||||
|
@ -2002,6 +2012,8 @@ jobs:
|
|||
cache-prefix: ${{ needs.prepare-workflow.outputs.cache-seed }}|3.10.13
|
||||
skip-code-coverage: false
|
||||
skip-junit-reports: false
|
||||
workflow-slug: scheduled
|
||||
default-timeout: 360
|
||||
|
||||
amazonlinux-2:
|
||||
name: Amazon Linux 2 Test
|
||||
|
@ -2022,6 +2034,8 @@ jobs:
|
|||
cache-prefix: ${{ needs.prepare-workflow.outputs.cache-seed }}|3.10.13
|
||||
skip-code-coverage: false
|
||||
skip-junit-reports: false
|
||||
workflow-slug: scheduled
|
||||
default-timeout: 360
|
||||
|
||||
archlinux-lts:
|
||||
name: Arch Linux LTS Test
|
||||
|
@ -2042,6 +2056,8 @@ jobs:
|
|||
cache-prefix: ${{ needs.prepare-workflow.outputs.cache-seed }}|3.10.13
|
||||
skip-code-coverage: false
|
||||
skip-junit-reports: false
|
||||
workflow-slug: scheduled
|
||||
default-timeout: 360
|
||||
|
||||
centos-7:
|
||||
name: CentOS 7 Test
|
||||
|
@ -2062,6 +2078,8 @@ jobs:
|
|||
cache-prefix: ${{ needs.prepare-workflow.outputs.cache-seed }}|3.10.13
|
||||
skip-code-coverage: false
|
||||
skip-junit-reports: false
|
||||
workflow-slug: scheduled
|
||||
default-timeout: 360
|
||||
|
||||
centosstream-8:
|
||||
name: CentOS Stream 8 Test
|
||||
|
@ -2082,6 +2100,8 @@ jobs:
|
|||
cache-prefix: ${{ needs.prepare-workflow.outputs.cache-seed }}|3.10.13
|
||||
skip-code-coverage: false
|
||||
skip-junit-reports: false
|
||||
workflow-slug: scheduled
|
||||
default-timeout: 360
|
||||
|
||||
centosstream-9:
|
||||
name: CentOS Stream 9 Test
|
||||
|
@ -2102,6 +2122,8 @@ jobs:
|
|||
cache-prefix: ${{ needs.prepare-workflow.outputs.cache-seed }}|3.10.13
|
||||
skip-code-coverage: false
|
||||
skip-junit-reports: false
|
||||
workflow-slug: scheduled
|
||||
default-timeout: 360
|
||||
|
||||
debian-10:
|
||||
name: Debian 10 Test
|
||||
|
@ -2122,6 +2144,8 @@ jobs:
|
|||
cache-prefix: ${{ needs.prepare-workflow.outputs.cache-seed }}|3.10.13
|
||||
skip-code-coverage: false
|
||||
skip-junit-reports: false
|
||||
workflow-slug: scheduled
|
||||
default-timeout: 360
|
||||
|
||||
debian-11:
|
||||
name: Debian 11 Test
|
||||
|
@ -2142,6 +2166,8 @@ jobs:
|
|||
cache-prefix: ${{ needs.prepare-workflow.outputs.cache-seed }}|3.10.13
|
||||
skip-code-coverage: false
|
||||
skip-junit-reports: false
|
||||
workflow-slug: scheduled
|
||||
default-timeout: 360
|
||||
|
||||
debian-11-arm64:
|
||||
name: Debian 11 Arm64 Test
|
||||
|
@ -2162,6 +2188,8 @@ jobs:
|
|||
cache-prefix: ${{ needs.prepare-workflow.outputs.cache-seed }}|3.10.13
|
||||
skip-code-coverage: false
|
||||
skip-junit-reports: false
|
||||
workflow-slug: scheduled
|
||||
default-timeout: 360
|
||||
|
||||
debian-12:
|
||||
name: Debian 12 Test
|
||||
|
@ -2182,6 +2210,8 @@ jobs:
|
|||
cache-prefix: ${{ needs.prepare-workflow.outputs.cache-seed }}|3.10.13
|
||||
skip-code-coverage: false
|
||||
skip-junit-reports: false
|
||||
workflow-slug: scheduled
|
||||
default-timeout: 360
|
||||
|
||||
debian-12-arm64:
|
||||
name: Debian 12 Arm64 Test
|
||||
|
@ -2202,6 +2232,8 @@ jobs:
|
|||
cache-prefix: ${{ needs.prepare-workflow.outputs.cache-seed }}|3.10.13
|
||||
skip-code-coverage: false
|
||||
skip-junit-reports: false
|
||||
workflow-slug: scheduled
|
||||
default-timeout: 360
|
||||
|
||||
fedora-37:
|
||||
name: Fedora 37 Test
|
||||
|
@ -2222,6 +2254,8 @@ jobs:
|
|||
cache-prefix: ${{ needs.prepare-workflow.outputs.cache-seed }}|3.10.13
|
||||
skip-code-coverage: false
|
||||
skip-junit-reports: false
|
||||
workflow-slug: scheduled
|
||||
default-timeout: 360
|
||||
|
||||
fedora-38:
|
||||
name: Fedora 38 Test
|
||||
|
@ -2242,6 +2276,8 @@ jobs:
|
|||
cache-prefix: ${{ needs.prepare-workflow.outputs.cache-seed }}|3.10.13
|
||||
skip-code-coverage: false
|
||||
skip-junit-reports: false
|
||||
workflow-slug: scheduled
|
||||
default-timeout: 360
|
||||
|
||||
opensuse-15:
|
||||
name: Opensuse 15 Test
|
||||
|
@ -2262,6 +2298,8 @@ jobs:
|
|||
cache-prefix: ${{ needs.prepare-workflow.outputs.cache-seed }}|3.10.13
|
||||
skip-code-coverage: false
|
||||
skip-junit-reports: false
|
||||
workflow-slug: scheduled
|
||||
default-timeout: 360
|
||||
|
||||
photonos-3:
|
||||
name: Photon OS 3 Test
|
||||
|
@ -2282,6 +2320,8 @@ jobs:
|
|||
cache-prefix: ${{ needs.prepare-workflow.outputs.cache-seed }}|3.10.13
|
||||
skip-code-coverage: false
|
||||
skip-junit-reports: false
|
||||
workflow-slug: scheduled
|
||||
default-timeout: 360
|
||||
|
||||
photonos-3-arm64:
|
||||
name: Photon OS 3 Arm64 Test
|
||||
|
@ -2302,6 +2342,8 @@ jobs:
|
|||
cache-prefix: ${{ needs.prepare-workflow.outputs.cache-seed }}|3.10.13
|
||||
skip-code-coverage: false
|
||||
skip-junit-reports: false
|
||||
workflow-slug: scheduled
|
||||
default-timeout: 360
|
||||
|
||||
photonos-4:
|
||||
name: Photon OS 4 Test
|
||||
|
@ -2322,6 +2364,8 @@ jobs:
|
|||
cache-prefix: ${{ needs.prepare-workflow.outputs.cache-seed }}|3.10.13
|
||||
skip-code-coverage: false
|
||||
skip-junit-reports: false
|
||||
workflow-slug: scheduled
|
||||
default-timeout: 360
|
||||
|
||||
photonos-4-arm64:
|
||||
name: Photon OS 4 Arm64 Test
|
||||
|
@ -2342,6 +2386,8 @@ jobs:
|
|||
cache-prefix: ${{ needs.prepare-workflow.outputs.cache-seed }}|3.10.13
|
||||
skip-code-coverage: false
|
||||
skip-junit-reports: false
|
||||
workflow-slug: scheduled
|
||||
default-timeout: 360
|
||||
|
||||
photonos-5:
|
||||
name: Photon OS 5 Test
|
||||
|
@ -2362,6 +2408,8 @@ jobs:
|
|||
cache-prefix: ${{ needs.prepare-workflow.outputs.cache-seed }}|3.10.13
|
||||
skip-code-coverage: false
|
||||
skip-junit-reports: false
|
||||
workflow-slug: scheduled
|
||||
default-timeout: 360
|
||||
|
||||
photonos-5-arm64:
|
||||
name: Photon OS 5 Arm64 Test
|
||||
|
@ -2382,6 +2430,8 @@ jobs:
|
|||
cache-prefix: ${{ needs.prepare-workflow.outputs.cache-seed }}|3.10.13
|
||||
skip-code-coverage: false
|
||||
skip-junit-reports: false
|
||||
workflow-slug: scheduled
|
||||
default-timeout: 360
|
||||
|
||||
ubuntu-2004:
|
||||
name: Ubuntu 20.04 Test
|
||||
|
@ -2402,6 +2452,8 @@ jobs:
|
|||
cache-prefix: ${{ needs.prepare-workflow.outputs.cache-seed }}|3.10.13
|
||||
skip-code-coverage: false
|
||||
skip-junit-reports: false
|
||||
workflow-slug: scheduled
|
||||
default-timeout: 360
|
||||
|
||||
ubuntu-2004-arm64:
|
||||
name: Ubuntu 20.04 Arm64 Test
|
||||
|
@ -2422,6 +2474,8 @@ jobs:
|
|||
cache-prefix: ${{ needs.prepare-workflow.outputs.cache-seed }}|3.10.13
|
||||
skip-code-coverage: false
|
||||
skip-junit-reports: false
|
||||
workflow-slug: scheduled
|
||||
default-timeout: 360
|
||||
|
||||
ubuntu-2204:
|
||||
name: Ubuntu 22.04 Test
|
||||
|
@ -2442,6 +2496,8 @@ jobs:
|
|||
cache-prefix: ${{ needs.prepare-workflow.outputs.cache-seed }}|3.10.13
|
||||
skip-code-coverage: false
|
||||
skip-junit-reports: false
|
||||
workflow-slug: scheduled
|
||||
default-timeout: 360
|
||||
|
||||
ubuntu-2204-arm64:
|
||||
name: Ubuntu 22.04 Arm64 Test
|
||||
|
@ -2462,6 +2518,8 @@ jobs:
|
|||
cache-prefix: ${{ needs.prepare-workflow.outputs.cache-seed }}|3.10.13
|
||||
skip-code-coverage: false
|
||||
skip-junit-reports: false
|
||||
workflow-slug: scheduled
|
||||
default-timeout: 360
|
||||
|
||||
combine-all-code-coverage:
|
||||
name: Combine Code Coverage
|
||||
|
|
58
.github/workflows/staging.yml
vendored
58
.github/workflows/staging.yml
vendored
|
@ -1924,6 +1924,8 @@ jobs:
|
|||
cache-prefix: ${{ needs.prepare-workflow.outputs.cache-seed }}|3.10.13
|
||||
skip-code-coverage: true
|
||||
skip-junit-reports: true
|
||||
workflow-slug: staging
|
||||
default-timeout: 180
|
||||
|
||||
windows-2019:
|
||||
name: Windows 2019 Test
|
||||
|
@ -1944,6 +1946,8 @@ jobs:
|
|||
cache-prefix: ${{ needs.prepare-workflow.outputs.cache-seed }}|3.10.13
|
||||
skip-code-coverage: true
|
||||
skip-junit-reports: true
|
||||
workflow-slug: staging
|
||||
default-timeout: 180
|
||||
|
||||
windows-2022:
|
||||
name: Windows 2022 Test
|
||||
|
@ -1964,6 +1968,8 @@ jobs:
|
|||
cache-prefix: ${{ needs.prepare-workflow.outputs.cache-seed }}|3.10.13
|
||||
skip-code-coverage: true
|
||||
skip-junit-reports: true
|
||||
workflow-slug: staging
|
||||
default-timeout: 180
|
||||
|
||||
macos-12:
|
||||
name: macOS 12 Test
|
||||
|
@ -1984,6 +1990,8 @@ jobs:
|
|||
cache-prefix: ${{ needs.prepare-workflow.outputs.cache-seed }}|3.10.13
|
||||
skip-code-coverage: true
|
||||
skip-junit-reports: true
|
||||
workflow-slug: staging
|
||||
default-timeout: 180
|
||||
|
||||
almalinux-8:
|
||||
name: Alma Linux 8 Test
|
||||
|
@ -2004,6 +2012,8 @@ jobs:
|
|||
cache-prefix: ${{ needs.prepare-workflow.outputs.cache-seed }}|3.10.13
|
||||
skip-code-coverage: true
|
||||
skip-junit-reports: true
|
||||
workflow-slug: staging
|
||||
default-timeout: 180
|
||||
|
||||
almalinux-9:
|
||||
name: Alma Linux 9 Test
|
||||
|
@ -2024,6 +2034,8 @@ jobs:
|
|||
cache-prefix: ${{ needs.prepare-workflow.outputs.cache-seed }}|3.10.13
|
||||
skip-code-coverage: true
|
||||
skip-junit-reports: true
|
||||
workflow-slug: staging
|
||||
default-timeout: 180
|
||||
|
||||
amazonlinux-2:
|
||||
name: Amazon Linux 2 Test
|
||||
|
@ -2044,6 +2056,8 @@ jobs:
|
|||
cache-prefix: ${{ needs.prepare-workflow.outputs.cache-seed }}|3.10.13
|
||||
skip-code-coverage: true
|
||||
skip-junit-reports: true
|
||||
workflow-slug: staging
|
||||
default-timeout: 180
|
||||
|
||||
archlinux-lts:
|
||||
name: Arch Linux LTS Test
|
||||
|
@ -2064,6 +2078,8 @@ jobs:
|
|||
cache-prefix: ${{ needs.prepare-workflow.outputs.cache-seed }}|3.10.13
|
||||
skip-code-coverage: true
|
||||
skip-junit-reports: true
|
||||
workflow-slug: staging
|
||||
default-timeout: 180
|
||||
|
||||
centos-7:
|
||||
name: CentOS 7 Test
|
||||
|
@ -2084,6 +2100,8 @@ jobs:
|
|||
cache-prefix: ${{ needs.prepare-workflow.outputs.cache-seed }}|3.10.13
|
||||
skip-code-coverage: true
|
||||
skip-junit-reports: true
|
||||
workflow-slug: staging
|
||||
default-timeout: 180
|
||||
|
||||
centosstream-8:
|
||||
name: CentOS Stream 8 Test
|
||||
|
@ -2104,6 +2122,8 @@ jobs:
|
|||
cache-prefix: ${{ needs.prepare-workflow.outputs.cache-seed }}|3.10.13
|
||||
skip-code-coverage: true
|
||||
skip-junit-reports: true
|
||||
workflow-slug: staging
|
||||
default-timeout: 180
|
||||
|
||||
centosstream-9:
|
||||
name: CentOS Stream 9 Test
|
||||
|
@ -2124,6 +2144,8 @@ jobs:
|
|||
cache-prefix: ${{ needs.prepare-workflow.outputs.cache-seed }}|3.10.13
|
||||
skip-code-coverage: true
|
||||
skip-junit-reports: true
|
||||
workflow-slug: staging
|
||||
default-timeout: 180
|
||||
|
||||
debian-10:
|
||||
name: Debian 10 Test
|
||||
|
@ -2144,6 +2166,8 @@ jobs:
|
|||
cache-prefix: ${{ needs.prepare-workflow.outputs.cache-seed }}|3.10.13
|
||||
skip-code-coverage: true
|
||||
skip-junit-reports: true
|
||||
workflow-slug: staging
|
||||
default-timeout: 180
|
||||
|
||||
debian-11:
|
||||
name: Debian 11 Test
|
||||
|
@ -2164,6 +2188,8 @@ jobs:
|
|||
cache-prefix: ${{ needs.prepare-workflow.outputs.cache-seed }}|3.10.13
|
||||
skip-code-coverage: true
|
||||
skip-junit-reports: true
|
||||
workflow-slug: staging
|
||||
default-timeout: 180
|
||||
|
||||
debian-11-arm64:
|
||||
name: Debian 11 Arm64 Test
|
||||
|
@ -2184,6 +2210,8 @@ jobs:
|
|||
cache-prefix: ${{ needs.prepare-workflow.outputs.cache-seed }}|3.10.13
|
||||
skip-code-coverage: true
|
||||
skip-junit-reports: true
|
||||
workflow-slug: staging
|
||||
default-timeout: 180
|
||||
|
||||
debian-12:
|
||||
name: Debian 12 Test
|
||||
|
@ -2204,6 +2232,8 @@ jobs:
|
|||
cache-prefix: ${{ needs.prepare-workflow.outputs.cache-seed }}|3.10.13
|
||||
skip-code-coverage: true
|
||||
skip-junit-reports: true
|
||||
workflow-slug: staging
|
||||
default-timeout: 180
|
||||
|
||||
debian-12-arm64:
|
||||
name: Debian 12 Arm64 Test
|
||||
|
@ -2224,6 +2254,8 @@ jobs:
|
|||
cache-prefix: ${{ needs.prepare-workflow.outputs.cache-seed }}|3.10.13
|
||||
skip-code-coverage: true
|
||||
skip-junit-reports: true
|
||||
workflow-slug: staging
|
||||
default-timeout: 180
|
||||
|
||||
fedora-37:
|
||||
name: Fedora 37 Test
|
||||
|
@ -2244,6 +2276,8 @@ jobs:
|
|||
cache-prefix: ${{ needs.prepare-workflow.outputs.cache-seed }}|3.10.13
|
||||
skip-code-coverage: true
|
||||
skip-junit-reports: true
|
||||
workflow-slug: staging
|
||||
default-timeout: 180
|
||||
|
||||
fedora-38:
|
||||
name: Fedora 38 Test
|
||||
|
@ -2264,6 +2298,8 @@ jobs:
|
|||
cache-prefix: ${{ needs.prepare-workflow.outputs.cache-seed }}|3.10.13
|
||||
skip-code-coverage: true
|
||||
skip-junit-reports: true
|
||||
workflow-slug: staging
|
||||
default-timeout: 180
|
||||
|
||||
opensuse-15:
|
||||
name: Opensuse 15 Test
|
||||
|
@ -2284,6 +2320,8 @@ jobs:
|
|||
cache-prefix: ${{ needs.prepare-workflow.outputs.cache-seed }}|3.10.13
|
||||
skip-code-coverage: true
|
||||
skip-junit-reports: true
|
||||
workflow-slug: staging
|
||||
default-timeout: 180
|
||||
|
||||
photonos-3:
|
||||
name: Photon OS 3 Test
|
||||
|
@ -2304,6 +2342,8 @@ jobs:
|
|||
cache-prefix: ${{ needs.prepare-workflow.outputs.cache-seed }}|3.10.13
|
||||
skip-code-coverage: true
|
||||
skip-junit-reports: true
|
||||
workflow-slug: staging
|
||||
default-timeout: 180
|
||||
|
||||
photonos-3-arm64:
|
||||
name: Photon OS 3 Arm64 Test
|
||||
|
@ -2324,6 +2364,8 @@ jobs:
|
|||
cache-prefix: ${{ needs.prepare-workflow.outputs.cache-seed }}|3.10.13
|
||||
skip-code-coverage: true
|
||||
skip-junit-reports: true
|
||||
workflow-slug: staging
|
||||
default-timeout: 180
|
||||
|
||||
photonos-4:
|
||||
name: Photon OS 4 Test
|
||||
|
@ -2344,6 +2386,8 @@ jobs:
|
|||
cache-prefix: ${{ needs.prepare-workflow.outputs.cache-seed }}|3.10.13
|
||||
skip-code-coverage: true
|
||||
skip-junit-reports: true
|
||||
workflow-slug: staging
|
||||
default-timeout: 180
|
||||
|
||||
photonos-4-arm64:
|
||||
name: Photon OS 4 Arm64 Test
|
||||
|
@ -2364,6 +2408,8 @@ jobs:
|
|||
cache-prefix: ${{ needs.prepare-workflow.outputs.cache-seed }}|3.10.13
|
||||
skip-code-coverage: true
|
||||
skip-junit-reports: true
|
||||
workflow-slug: staging
|
||||
default-timeout: 180
|
||||
|
||||
photonos-5:
|
||||
name: Photon OS 5 Test
|
||||
|
@ -2384,6 +2430,8 @@ jobs:
|
|||
cache-prefix: ${{ needs.prepare-workflow.outputs.cache-seed }}|3.10.13
|
||||
skip-code-coverage: true
|
||||
skip-junit-reports: true
|
||||
workflow-slug: staging
|
||||
default-timeout: 180
|
||||
|
||||
photonos-5-arm64:
|
||||
name: Photon OS 5 Arm64 Test
|
||||
|
@ -2404,6 +2452,8 @@ jobs:
|
|||
cache-prefix: ${{ needs.prepare-workflow.outputs.cache-seed }}|3.10.13
|
||||
skip-code-coverage: true
|
||||
skip-junit-reports: true
|
||||
workflow-slug: staging
|
||||
default-timeout: 180
|
||||
|
||||
ubuntu-2004:
|
||||
name: Ubuntu 20.04 Test
|
||||
|
@ -2424,6 +2474,8 @@ jobs:
|
|||
cache-prefix: ${{ needs.prepare-workflow.outputs.cache-seed }}|3.10.13
|
||||
skip-code-coverage: true
|
||||
skip-junit-reports: true
|
||||
workflow-slug: staging
|
||||
default-timeout: 180
|
||||
|
||||
ubuntu-2004-arm64:
|
||||
name: Ubuntu 20.04 Arm64 Test
|
||||
|
@ -2444,6 +2496,8 @@ jobs:
|
|||
cache-prefix: ${{ needs.prepare-workflow.outputs.cache-seed }}|3.10.13
|
||||
skip-code-coverage: true
|
||||
skip-junit-reports: true
|
||||
workflow-slug: staging
|
||||
default-timeout: 180
|
||||
|
||||
ubuntu-2204:
|
||||
name: Ubuntu 22.04 Test
|
||||
|
@ -2464,6 +2518,8 @@ jobs:
|
|||
cache-prefix: ${{ needs.prepare-workflow.outputs.cache-seed }}|3.10.13
|
||||
skip-code-coverage: true
|
||||
skip-junit-reports: true
|
||||
workflow-slug: staging
|
||||
default-timeout: 180
|
||||
|
||||
ubuntu-2204-arm64:
|
||||
name: Ubuntu 22.04 Arm64 Test
|
||||
|
@ -2484,6 +2540,8 @@ jobs:
|
|||
cache-prefix: ${{ needs.prepare-workflow.outputs.cache-seed }}|3.10.13
|
||||
skip-code-coverage: true
|
||||
skip-junit-reports: true
|
||||
workflow-slug: staging
|
||||
default-timeout: 180
|
||||
|
||||
build-src-repo:
|
||||
name: Build Repository
|
||||
|
|
|
@ -28,7 +28,7 @@ on:
|
|||
description: Skip running the Salt packages test suite.
|
||||
schedule:
|
||||
# https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#onschedule
|
||||
- cron: '0 1 * * *' # Every day at 1AM
|
||||
- cron: '0 0 * * *' # Every day at 0AM
|
||||
|
||||
<%- endblock on %>
|
||||
|
||||
|
|
|
@ -150,7 +150,6 @@ permissions:
|
|||
|
||||
download-onedir-artifact:
|
||||
name: Download Staging Onedir Artifact
|
||||
if: ${{ inputs.skip-salt-pkg-download-test-suite == false }}
|
||||
runs-on:
|
||||
- self-hosted
|
||||
- linux
|
||||
|
|
11
.github/workflows/templates/test-salt.yml.jinja
vendored
11
.github/workflows/templates/test-salt.yml.jinja
vendored
|
@ -1,3 +1,8 @@
|
|||
<%- if workflow_slug in ("nightly", "scheduled") %>
|
||||
<%- set timeout_value = 360 %>
|
||||
<%- else %>
|
||||
<%- set timeout_value = 180 %>
|
||||
<%- endif %>
|
||||
|
||||
<%- for slug, display_name, arch in test_salt_listing["windows"] %>
|
||||
|
||||
|
@ -21,6 +26,8 @@
|
|||
cache-prefix: ${{ needs.prepare-workflow.outputs.cache-seed }}|<{ python_version }>
|
||||
skip-code-coverage: <{ skip_test_coverage_check }>
|
||||
skip-junit-reports: <{ skip_junit_reports_check }>
|
||||
workflow-slug: <{ workflow_slug }>
|
||||
default-timeout: <{ timeout_value }>
|
||||
|
||||
<%- endfor %>
|
||||
|
||||
|
@ -47,6 +54,8 @@
|
|||
cache-prefix: ${{ needs.prepare-workflow.outputs.cache-seed }}|<{ python_version }>
|
||||
skip-code-coverage: <{ skip_test_coverage_check }>
|
||||
skip-junit-reports: <{ skip_junit_reports_check }>
|
||||
workflow-slug: <{ workflow_slug }>
|
||||
default-timeout: <{ timeout_value }>
|
||||
|
||||
<%- endfor %>
|
||||
|
||||
|
@ -73,5 +82,7 @@
|
|||
cache-prefix: ${{ needs.prepare-workflow.outputs.cache-seed }}|<{ python_version }>
|
||||
skip-code-coverage: <{ skip_test_coverage_check }>
|
||||
skip-junit-reports: <{ skip_junit_reports_check }>
|
||||
workflow-slug: <{ workflow_slug }>
|
||||
default-timeout: <{ timeout_value }>
|
||||
|
||||
<%- endfor %>
|
||||
|
|
18
.github/workflows/test-action-macos.yml
vendored
18
.github/workflows/test-action-macos.yml
vendored
|
@ -56,6 +56,16 @@ on:
|
|||
type: boolean
|
||||
description: Skip Publishing JUnit Reports
|
||||
default: false
|
||||
workflow-slug:
|
||||
required: false
|
||||
type: string
|
||||
description: Which workflow is running.
|
||||
default: ci
|
||||
default-timeout:
|
||||
required: false
|
||||
type: number
|
||||
description: Timeout, in minutes, for the test job(Default 360, 6 hours).
|
||||
default: 360
|
||||
|
||||
env:
|
||||
COLUMNS: 190
|
||||
|
@ -85,14 +95,14 @@ jobs:
|
|||
- name: Generate Test Matrix
|
||||
id: generate-matrix
|
||||
run: |
|
||||
tools ci matrix ${{ inputs.distro-slug }}
|
||||
tools ci matrix --workflow=${{ inputs.workflow-slug }} ${{ inputs.distro-slug }}
|
||||
|
||||
test:
|
||||
name: Test
|
||||
runs-on: ${{ inputs.distro-slug }}
|
||||
# Full test runs. Each chunk should never take more than 3 hours.
|
||||
# Partial test runs(no chunk parallelization), 5 Hours
|
||||
timeout-minutes: ${{ fromJSON(inputs.testrun)['type'] == 'full' && 180 || 300 }}
|
||||
# Full test runs. Each chunk should never take more than 2 hours.
|
||||
# Partial test runs(no chunk parallelization), 6 Hours
|
||||
timeout-minutes: ${{ fromJSON(inputs.testrun)['type'] == 'full' && inputs.default-timeout || 360 }}
|
||||
needs:
|
||||
- generate-matrix
|
||||
strategy:
|
||||
|
|
18
.github/workflows/test-action.yml
vendored
18
.github/workflows/test-action.yml
vendored
|
@ -56,6 +56,16 @@ on:
|
|||
type: boolean
|
||||
description: Skip Publishing JUnit Reports
|
||||
default: false
|
||||
workflow-slug:
|
||||
required: false
|
||||
type: string
|
||||
description: Which workflow is running.
|
||||
default: ci
|
||||
default-timeout:
|
||||
required: false
|
||||
type: number
|
||||
description: Timeout, in minutes, for the test job(Default 360, 6 hours).
|
||||
default: 360
|
||||
|
||||
env:
|
||||
COLUMNS: 190
|
||||
|
@ -90,7 +100,7 @@ jobs:
|
|||
- name: Generate Test Matrix
|
||||
id: generate-matrix
|
||||
run: |
|
||||
tools ci matrix ${{ fromJSON(inputs.testrun)['type'] == 'full' && '--full ' || '' }}${{ inputs.distro-slug }}
|
||||
tools ci matrix --workflow=${{ inputs.workflow-slug }} ${{ fromJSON(inputs.testrun)['type'] == 'full' && '--full ' || '' }}${{ inputs.distro-slug }}
|
||||
|
||||
test:
|
||||
name: Test
|
||||
|
@ -98,9 +108,9 @@ jobs:
|
|||
- self-hosted
|
||||
- linux
|
||||
- bastion
|
||||
# Full test runs. Each chunk should never take more than 3 hours.
|
||||
# Partial test runs(no chunk parallelization), 5 Hours
|
||||
timeout-minutes: ${{ fromJSON(inputs.testrun)['type'] == 'full' && 180 || 300 }}
|
||||
# Full test runs. Each chunk should never take more than 2 hours.
|
||||
# Partial test runs(no chunk parallelization), 6 Hours
|
||||
timeout-minutes: ${{ fromJSON(inputs.testrun)['type'] == 'full' && inputs.default-timeout || 360 }}
|
||||
needs:
|
||||
- generate-matrix
|
||||
strategy:
|
||||
|
|
1
changelog/65114.fixed.md
Normal file
1
changelog/65114.fixed.md
Normal file
|
@ -0,0 +1 @@
|
|||
Fix nonce verification, request server replies do not stomp on eachother.
|
2
changelog/65231.fixed.md
Normal file
2
changelog/65231.fixed.md
Normal file
|
@ -0,0 +1,2 @@
|
|||
Install logrotate config as /etc/logrotate.d/salt-common for Debian packages
|
||||
Remove broken /etc/logrotate.d/salt directory from 3006.3 if it exists.
|
|
@ -25,7 +25,7 @@ Description: Salt debug symbols
|
|||
Package: salt-common
|
||||
Architecture: amd64 arm64
|
||||
Depends: ${misc:Depends}
|
||||
Breaks: salt-minion (<= 3006.1)
|
||||
Breaks: salt-minion (<= 3006.4)
|
||||
Suggests: ifupdown
|
||||
Recommends: lsb-release
|
||||
Description: shared libraries that salt requires for all packages
|
||||
|
@ -51,8 +51,8 @@ Description: shared libraries that salt requires for all packages
|
|||
|
||||
Package: salt-master
|
||||
Architecture: amd64 arm64
|
||||
Replaces: salt-common (<= 3006.1)
|
||||
Breaks: salt-common (<= 3006.1)
|
||||
Replaces: salt-common (<= 3006.4)
|
||||
Breaks: salt-common (<= 3006.4)
|
||||
Depends: salt-common (= ${source:Version}),
|
||||
${misc:Depends}
|
||||
Description: remote manager to administer servers via salt
|
||||
|
@ -77,8 +77,8 @@ Description: remote manager to administer servers via salt
|
|||
|
||||
Package: salt-minion
|
||||
Architecture: amd64 arm64
|
||||
Replaces: salt-common (<= 3006.1)
|
||||
Breaks: salt-common (<= 3006.1)
|
||||
Replaces: salt-common (<= 3006.4)
|
||||
Breaks: salt-common (<= 3006.4)
|
||||
Depends: bsdmainutils,
|
||||
dctrl-tools,
|
||||
salt-common (= ${source:Version}),
|
||||
|
@ -131,7 +131,7 @@ Description: master-of-masters for salt, the distributed remote execution system
|
|||
|
||||
Package: salt-ssh
|
||||
Architecture: amd64 arm64
|
||||
Breaks: salt-common (<= 3006.3)
|
||||
Breaks: salt-common (<= 3006.4)
|
||||
Depends: salt-common (= ${source:Version}),
|
||||
openssh-client,
|
||||
${misc:Depends}
|
||||
|
@ -160,7 +160,7 @@ Description: remote manager to administer servers via Salt SSH
|
|||
|
||||
Package: salt-cloud
|
||||
Architecture: amd64 arm64
|
||||
Breaks: salt-common (<= 3006.3)
|
||||
Breaks: salt-common (<= 3006.4)
|
||||
Depends: salt-common (= ${source:Version}),
|
||||
${misc:Depends}
|
||||
Description: public cloud VM management system
|
||||
|
|
1
pkg/debian/salt-common.conffiles
Normal file
1
pkg/debian/salt-common.conffiles
Normal file
|
@ -0,0 +1 @@
|
|||
/etc/logrotate.d/salt-common
|
|
@ -4,3 +4,4 @@
|
|||
/usr/share/fish/vendor_completions.d
|
||||
/opt/saltstack/salt
|
||||
/etc/salt
|
||||
/etc/logrotate.d
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
pkg/common/salt-common.logrotate /etc/logrotate.d/salt
|
||||
pkg/common/logrotate/salt-common /etc/logrotate.d
|
||||
pkg/common/fish-completions/salt-cp.fish /usr/share/fish/vendor_completions.d
|
||||
pkg/common/fish-completions/salt-call.fish /usr/share/fish/vendor_completions.d
|
||||
pkg/common/fish-completions/salt-syndic.fish /usr/share/fish/vendor_completions.d
|
||||
|
|
|
@ -31,5 +31,9 @@ case "$1" in
|
|||
-s $SALT_SHELL \
|
||||
-g $SALT_GROUP \
|
||||
$SALT_USER
|
||||
|
||||
# Remove incorrectly installed logrotate config - issue 65231
|
||||
test -d /etc/logrotate.d/salt && rm -r /etc/logrotate.d/salt || /bin/true
|
||||
|
||||
;;
|
||||
esac
|
||||
|
|
|
@ -266,7 +266,7 @@ install -p -m 0644 %{_salt_src}/pkg/common/salt-proxy@.service %{buildroot}%{_un
|
|||
# Logrotate
|
||||
#install -p %{SOURCE10} .
|
||||
mkdir -p %{buildroot}%{_sysconfdir}/logrotate.d/
|
||||
install -p -m 0644 %{_salt_src}/pkg/common/salt-common.logrotate %{buildroot}%{_sysconfdir}/logrotate.d/salt
|
||||
install -p -m 0644 %{_salt_src}/pkg/common/logrotate/salt-common %{buildroot}%{_sysconfdir}/logrotate.d/salt
|
||||
|
||||
# Bash completion
|
||||
mkdir -p %{buildroot}%{_sysconfdir}/bash_completion.d/
|
||||
|
|
46
pkg/tests/integration/test_logrotate_config.py
Normal file
46
pkg/tests/integration/test_logrotate_config.py
Normal file
|
@ -0,0 +1,46 @@
|
|||
"""
|
||||
Tests for logrotate config
|
||||
"""
|
||||
|
||||
import pathlib
|
||||
|
||||
import packaging.version
|
||||
import pytest
|
||||
|
||||
pytestmark = [
|
||||
pytest.mark.skip_unless_on_linux,
|
||||
]
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def logrotate_config_file(grains):
|
||||
"""
|
||||
Fixture for logrotate config file path
|
||||
"""
|
||||
if grains["os_family"] == "RedHat":
|
||||
return pathlib.Path("/etc/logrotate.d", "salt")
|
||||
elif grains["os_family"] == "Debian":
|
||||
return pathlib.Path("/etc/logrotate.d", "salt-common")
|
||||
|
||||
|
||||
def test_logrotate_config(logrotate_config_file):
|
||||
"""
|
||||
Test that logrotate config has been installed in correctly
|
||||
"""
|
||||
assert logrotate_config_file.is_file()
|
||||
assert logrotate_config_file.owner() == "root"
|
||||
assert logrotate_config_file.group() == "root"
|
||||
|
||||
|
||||
def test_issue_65231_etc_logrotate_salt_dir_removed(install_salt):
|
||||
"""
|
||||
Test that /etc/logrotate.d/salt is not a directory
|
||||
"""
|
||||
if install_salt.prev_version and packaging.version.parse(
|
||||
install_salt.prev_version
|
||||
) <= packaging.version.parse("3006.4"):
|
||||
pytest.skip("Testing a downgrade to 3006.4, do not run")
|
||||
|
||||
path = pathlib.Path("/etc/logrotate.d/salt")
|
||||
if path.exists():
|
||||
assert path.is_dir() is False
|
|
@ -1,11 +1,8 @@
|
|||
# This is a compilation of requirements installed on salt-jenkins git.salt state run
|
||||
# XXX: Temporarily do not install pylxd.
|
||||
# pylxd(or likely ws4py) will cause the test suite to hang at the finish line under runtests.py
|
||||
# pylxd>=2.2.5
|
||||
|
||||
--constraint=../pkg/py{py_version}/{platform}.txt
|
||||
|
||||
pygit2>=1.2.0
|
||||
pygit2>=1.10.1
|
||||
yamllint
|
||||
mercurial
|
||||
hglib
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
--constraint=../pkg/py{py_version}/{platform}.txt
|
||||
|
||||
pyiface
|
||||
pygit2>=1.4.0
|
||||
pygit2>=1.10.1
|
||||
pymysql>=1.0.2
|
||||
ansible>=4.4.0,<5.0.1; python_version < '3.9'
|
||||
ansible>=7.0.0; python_version >= '3.9'
|
||||
|
|
|
@ -335,7 +335,7 @@ pydantic==1.10.8
|
|||
# inflect
|
||||
pyeapi==1.0.0
|
||||
# via napalm
|
||||
pygit2==1.12.1
|
||||
pygit2==1.13.1
|
||||
# via -r requirements/static/ci/darwin.in
|
||||
pynacl==1.5.0
|
||||
# via
|
||||
|
|
|
@ -353,7 +353,7 @@ pydantic==1.10.8
|
|||
# inflect
|
||||
pyeapi==1.0.0
|
||||
# via napalm
|
||||
pygit2==1.12.1
|
||||
pygit2==1.13.1
|
||||
# via -r requirements/static/ci/linux.in
|
||||
pyiface==0.0.11
|
||||
# via -r requirements/static/ci/linux.in
|
||||
|
|
|
@ -286,7 +286,7 @@ pydantic==1.10.8
|
|||
# via
|
||||
# -c requirements/static/ci/../pkg/py3.10/windows.txt
|
||||
# inflect
|
||||
pygit2==1.12.1
|
||||
pygit2==1.13.1
|
||||
# via -r requirements/static/ci/windows.in
|
||||
pymssql==2.2.7
|
||||
# via
|
||||
|
|
|
@ -331,7 +331,7 @@ pydantic==1.10.8
|
|||
# inflect
|
||||
pyeapi==1.0.0
|
||||
# via napalm
|
||||
pygit2==1.12.1
|
||||
pygit2==1.13.1
|
||||
# via -r requirements/static/ci/darwin.in
|
||||
pynacl==1.5.0
|
||||
# via
|
||||
|
|
|
@ -349,7 +349,7 @@ pydantic==1.10.8
|
|||
# inflect
|
||||
pyeapi==1.0.0
|
||||
# via napalm
|
||||
pygit2==1.12.1
|
||||
pygit2==1.13.1
|
||||
# via -r requirements/static/ci/linux.in
|
||||
pyiface==0.0.11
|
||||
# via -r requirements/static/ci/linux.in
|
||||
|
|
|
@ -284,7 +284,7 @@ pydantic==1.10.8
|
|||
# via
|
||||
# -c requirements/static/ci/../pkg/py3.11/windows.txt
|
||||
# inflect
|
||||
pygit2==1.12.1
|
||||
pygit2==1.13.1
|
||||
# via -r requirements/static/ci/windows.in
|
||||
pymssql==2.2.7
|
||||
# via
|
||||
|
|
|
@ -357,7 +357,7 @@ pydantic==1.10.8
|
|||
# inflect
|
||||
pyeapi==1.0.0
|
||||
# via napalm
|
||||
pygit2==1.12.1
|
||||
pygit2==1.13.1
|
||||
# via -r requirements/static/ci/linux.in
|
||||
pyiface==0.0.11
|
||||
# via -r requirements/static/ci/linux.in
|
||||
|
|
|
@ -290,7 +290,7 @@ pydantic==1.10.8
|
|||
# via
|
||||
# -c requirements/static/ci/../pkg/py3.8/windows.txt
|
||||
# inflect
|
||||
pygit2==1.12.1
|
||||
pygit2==1.13.1
|
||||
# via -r requirements/static/ci/windows.in
|
||||
pymssql==2.2.7
|
||||
# via
|
||||
|
|
|
@ -335,7 +335,7 @@ pydantic==1.10.8
|
|||
# inflect
|
||||
pyeapi==1.0.0
|
||||
# via napalm
|
||||
pygit2==1.12.1
|
||||
pygit2==1.13.1
|
||||
# via -r requirements/static/ci/darwin.in
|
||||
pynacl==1.5.0
|
||||
# via
|
||||
|
|
|
@ -355,7 +355,7 @@ pydantic==1.10.8
|
|||
# inflect
|
||||
pyeapi==1.0.0
|
||||
# via napalm
|
||||
pygit2==1.12.1
|
||||
pygit2==1.13.1
|
||||
# via -r requirements/static/ci/linux.in
|
||||
pyiface==0.0.11
|
||||
# via -r requirements/static/ci/linux.in
|
||||
|
|
|
@ -286,7 +286,7 @@ pydantic==1.10.8
|
|||
# via
|
||||
# -c requirements/static/ci/../pkg/py3.9/windows.txt
|
||||
# inflect
|
||||
pygit2==1.12.1
|
||||
pygit2==1.13.1
|
||||
# via -r requirements/static/ci/windows.in
|
||||
pymssql==2.2.7
|
||||
# via
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
|
||||
dmidecode
|
||||
patch
|
||||
pygit2>=1.2.0
|
||||
pygit2>=1.10.1
|
||||
sed
|
||||
pywinrm>=0.4.1
|
||||
yamllint
|
||||
|
|
|
@ -16,6 +16,7 @@ import socket
|
|||
import threading
|
||||
import time
|
||||
import urllib
|
||||
import uuid
|
||||
import warnings
|
||||
|
||||
import tornado
|
||||
|
@ -36,6 +37,7 @@ import salt.utils.platform
|
|||
import salt.utils.versions
|
||||
from salt.exceptions import SaltClientError, SaltReqTimeoutError
|
||||
from salt.utils.network import ip_bracket
|
||||
from salt.utils.process import SignalHandlingProcess
|
||||
|
||||
if salt.utils.platform.is_windows():
|
||||
USE_LOAD_BALANCER = True
|
||||
|
@ -134,38 +136,47 @@ def _set_tcp_keepalive(sock, opts):
|
|||
sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 0)
|
||||
|
||||
|
||||
if USE_LOAD_BALANCER:
|
||||
class LoadBalancerServer(SignalHandlingProcess):
|
||||
"""
|
||||
Raw TCP server which runs in its own process and will listen
|
||||
for incoming connections. Each incoming connection will be
|
||||
sent via multiprocessing queue to the workers.
|
||||
Since the queue is shared amongst workers, only one worker will
|
||||
handle a given connection.
|
||||
"""
|
||||
|
||||
class LoadBalancerServer(SignalHandlingProcess):
|
||||
"""
|
||||
Raw TCP server which runs in its own process and will listen
|
||||
for incoming connections. Each incoming connection will be
|
||||
sent via multiprocessing queue to the workers.
|
||||
Since the queue is shared amongst workers, only one worker will
|
||||
handle a given connection.
|
||||
"""
|
||||
# TODO: opts!
|
||||
# Based on default used in tornado.netutil.bind_sockets()
|
||||
backlog = 128
|
||||
|
||||
# TODO: opts!
|
||||
# Based on default used in tornado.netutil.bind_sockets()
|
||||
backlog = 128
|
||||
def __init__(self, opts, socket_queue, **kwargs):
|
||||
super().__init__(**kwargs)
|
||||
self.opts = opts
|
||||
self.socket_queue = socket_queue
|
||||
self._socket = None
|
||||
|
||||
def __init__(self, opts, socket_queue, **kwargs):
|
||||
super().__init__(**kwargs)
|
||||
self.opts = opts
|
||||
self.socket_queue = socket_queue
|
||||
def close(self):
|
||||
if self._socket is not None:
|
||||
self._socket.shutdown(socket.SHUT_RDWR)
|
||||
self._socket.close()
|
||||
self._socket = None
|
||||
|
||||
def close(self):
|
||||
if self._socket is not None:
|
||||
self._socket.shutdown(socket.SHUT_RDWR)
|
||||
self._socket.close()
|
||||
self._socket = None
|
||||
# pylint: disable=W1701
|
||||
def __del__(self):
|
||||
self.close()
|
||||
|
||||
# pylint: disable=W1701
|
||||
def __del__(self):
|
||||
self.close()
|
||||
# pylint: enable=W1701
|
||||
|
||||
# pylint: enable=W1701
|
||||
def run(self):
|
||||
"""
|
||||
Start the load balancer
|
||||
"""
|
||||
self._socket = _get_socket(self.opts)
|
||||
self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
|
||||
_set_tcp_keepalive(self._socket, self.opts)
|
||||
self._socket.setblocking(1)
|
||||
self._socket.bind(_get_bind_addr(self.opts, "ret_port"))
|
||||
self._socket.listen(self.backlog)
|
||||
|
||||
def run(self):
|
||||
"""
|
||||
|
@ -650,45 +661,43 @@ class SaltMessageServer(tornado.tcpserver.TCPServer):
|
|||
raise
|
||||
|
||||
|
||||
if USE_LOAD_BALANCER:
|
||||
class LoadBalancerWorker(SaltMessageServer):
|
||||
"""
|
||||
This will receive TCP connections from 'LoadBalancerServer' via
|
||||
a multiprocessing queue.
|
||||
Since the queue is shared amongst workers, only one worker will handle
|
||||
a given connection.
|
||||
"""
|
||||
|
||||
class LoadBalancerWorker(SaltMessageServer):
|
||||
"""
|
||||
This will receive TCP connections from 'LoadBalancerServer' via
|
||||
a multiprocessing queue.
|
||||
Since the queue is shared amongst workers, only one worker will handle
|
||||
a given connection.
|
||||
"""
|
||||
def __init__(self, socket_queue, message_handler, *args, **kwargs):
|
||||
super().__init__(message_handler, *args, **kwargs)
|
||||
self.socket_queue = socket_queue
|
||||
self._stop = threading.Event()
|
||||
self.thread = threading.Thread(target=self.socket_queue_thread)
|
||||
self.thread.start()
|
||||
|
||||
def __init__(self, socket_queue, message_handler, *args, **kwargs):
|
||||
super().__init__(message_handler, *args, **kwargs)
|
||||
self.socket_queue = socket_queue
|
||||
self._stop = threading.Event()
|
||||
self.thread = threading.Thread(target=self.socket_queue_thread)
|
||||
self.thread.start()
|
||||
def close(self):
|
||||
self._stop.set()
|
||||
self.thread.join()
|
||||
super().close()
|
||||
|
||||
def close(self):
|
||||
self._stop.set()
|
||||
self.thread.join()
|
||||
super().close()
|
||||
|
||||
def socket_queue_thread(self):
|
||||
try:
|
||||
while True:
|
||||
try:
|
||||
client_socket, address = self.socket_queue.get(True, 1)
|
||||
except queue.Empty:
|
||||
if self._stop.is_set():
|
||||
break
|
||||
continue
|
||||
# 'self.io_loop' initialized in super class
|
||||
# 'tornado.tcpserver.TCPServer'.
|
||||
# 'self._handle_connection' defined in same super class.
|
||||
self.io_loop.spawn_callback(
|
||||
self._handle_connection, client_socket, address
|
||||
)
|
||||
except (KeyboardInterrupt, SystemExit):
|
||||
pass
|
||||
def socket_queue_thread(self):
|
||||
try:
|
||||
while True:
|
||||
try:
|
||||
client_socket, address = self.socket_queue.get(True, 1)
|
||||
except queue.Empty:
|
||||
if self._stop.is_set():
|
||||
break
|
||||
continue
|
||||
# 'self.io_loop' initialized in super class
|
||||
# 'salt.ext.tornado.tcpserver.TCPServer'.
|
||||
# 'self._handle_connection' defined in same super class.
|
||||
self.io_loop.spawn_callback(
|
||||
self._handle_connection, client_socket, address
|
||||
)
|
||||
except (KeyboardInterrupt, SystemExit):
|
||||
pass
|
||||
|
||||
|
||||
class TCPClientKeepAlive(tornado.tcpclient.TCPClient):
|
||||
|
@ -749,10 +758,7 @@ class MessageClient:
|
|||
self.io_loop = io_loop or tornado.ioloop.IOLoop.current()
|
||||
with salt.utils.asynchronous.current_ioloop(self.io_loop):
|
||||
self._tcp_client = TCPClientKeepAlive(opts, resolver=resolver)
|
||||
self._mid = 1
|
||||
self._max_messages = int((1 << 31) - 2) # number of IDs before we wrap
|
||||
# TODO: max queue size
|
||||
self.send_queue = [] # queue of messages to be sent
|
||||
self.send_future_map = {} # mapping of request_id -> Future
|
||||
|
||||
self._read_until_future = None
|
||||
|
@ -765,10 +771,6 @@ class MessageClient:
|
|||
|
||||
self.backoff = opts.get("tcp_reconnect_backoff", 1)
|
||||
|
||||
def _stop_io_loop(self):
|
||||
if self.io_loop is not None:
|
||||
self.io_loop.stop()
|
||||
|
||||
# TODO: timeout inflight sessions
|
||||
def close(self):
|
||||
if self._closing:
|
||||
|
@ -902,18 +904,7 @@ class MessageClient:
|
|||
self._stream_return_running = False
|
||||
|
||||
def _message_id(self):
|
||||
wrap = False
|
||||
while self._mid in self.send_future_map:
|
||||
if self._mid >= self._max_messages:
|
||||
if wrap:
|
||||
# this shouldn't ever happen, but just in case
|
||||
raise Exception("Unable to find available messageid")
|
||||
self._mid = 1
|
||||
wrap = True
|
||||
else:
|
||||
self._mid += 1
|
||||
|
||||
return self._mid
|
||||
return str(uuid.uuid4())
|
||||
|
||||
# TODO: return a message object which takes care of multiplexing?
|
||||
def on_recv(self, callback):
|
||||
|
|
|
@ -19,6 +19,7 @@ import tornado.ioloop
|
|||
import tornado.locks
|
||||
import zmq.asyncio
|
||||
import zmq.error
|
||||
import zmq.eventloop.future
|
||||
import zmq.eventloop.zmqstream
|
||||
|
||||
import salt.payload
|
||||
|
@ -633,16 +634,15 @@ class AsyncReqMessageClient:
|
|||
else:
|
||||
self.io_loop = io_loop
|
||||
|
||||
self.context = zmq.Context()
|
||||
self.context = zmq.eventloop.future.Context()
|
||||
|
||||
self.send_queue = []
|
||||
|
||||
self._closing = False
|
||||
self._future = None
|
||||
self.lock = tornado.locks.Lock()
|
||||
|
||||
def connect(self):
|
||||
if hasattr(self, "stream"):
|
||||
if hasattr(self, "socket") and self.socket:
|
||||
return
|
||||
# wire up sockets
|
||||
self._init_socket()
|
||||
|
@ -658,24 +658,10 @@ class AsyncReqMessageClient:
|
|||
return
|
||||
else:
|
||||
self._closing = True
|
||||
if hasattr(self, "stream") and self.stream is not None:
|
||||
if ZMQ_VERSION_INFO < (14, 3, 0):
|
||||
# stream.close() doesn't work properly on pyzmq < 14.3.0
|
||||
if self.stream.socket:
|
||||
self.stream.socket.close()
|
||||
self.stream.io_loop.remove_handler(self.stream.socket)
|
||||
# set this to None, more hacks for messed up pyzmq
|
||||
self.stream.socket = None
|
||||
self.socket.close()
|
||||
else:
|
||||
self.stream.close(1)
|
||||
self.socket = None
|
||||
self.stream = None
|
||||
if self._future:
|
||||
self._future.set_exception(SaltException("Closing connection"))
|
||||
self._future = None
|
||||
if hasattr(self, "socket") and self.socket is not None:
|
||||
self.socket.close(0)
|
||||
self.socket = None
|
||||
if self.context.closed is False:
|
||||
# This hangs if closing the stream causes an import error
|
||||
self.context.term()
|
||||
|
||||
def _init_socket(self):
|
||||
|
@ -692,23 +678,8 @@ class AsyncReqMessageClient:
|
|||
self.socket.setsockopt(zmq.IPV6, 1)
|
||||
elif hasattr(zmq, "IPV4ONLY"):
|
||||
self.socket.setsockopt(zmq.IPV4ONLY, 0)
|
||||
self.socket.linger = self.linger
|
||||
self.socket.setsockopt(zmq.LINGER, self.linger)
|
||||
self.socket.connect(self.addr)
|
||||
self.stream = zmq.eventloop.zmqstream.ZMQStream(
|
||||
self.socket, io_loop=self.io_loop
|
||||
)
|
||||
self.stream.on_recv(self.handle_reply)
|
||||
|
||||
def timeout_message(self, future):
|
||||
"""
|
||||
Handle a message timeout by removing it from the sending queue
|
||||
and informing the caller
|
||||
|
||||
:raises: SaltReqTimeoutError
|
||||
"""
|
||||
if self._future == future:
|
||||
self._future = None
|
||||
future.set_exception(SaltReqTimeoutError("Message timed out"))
|
||||
|
||||
@tornado.gen.coroutine
|
||||
def send(self, message, timeout=None, callback=None):
|
||||
|
@ -732,20 +703,27 @@ class AsyncReqMessageClient:
|
|||
|
||||
if timeout is not None:
|
||||
send_timeout = self.io_loop.call_later(
|
||||
timeout, self.timeout_message, future
|
||||
timeout, self._timeout_message, future
|
||||
)
|
||||
|
||||
with (yield self.lock.acquire()):
|
||||
self._future = future
|
||||
yield self.stream.send(message)
|
||||
recv = yield future
|
||||
self.io_loop.spawn_callback(self._send_recv, message, future)
|
||||
|
||||
recv = yield future
|
||||
|
||||
raise tornado.gen.Return(recv)
|
||||
|
||||
def handle_reply(self, msg):
|
||||
data = salt.payload.loads(msg[0])
|
||||
future = self._future
|
||||
self._future = None
|
||||
future.set_result(data)
|
||||
def _timeout_message(self, future):
|
||||
if not future.done():
|
||||
future.set_exception(SaltReqTimeoutError("Message timed out"))
|
||||
|
||||
@tornado.gen.coroutine
|
||||
def _send_recv(self, message, future):
|
||||
with (yield self.lock.acquire()):
|
||||
yield self.socket.send(message)
|
||||
recv = yield self.socket.recv()
|
||||
if not future.done():
|
||||
data = salt.payload.loads(recv)
|
||||
future.set_result(data)
|
||||
|
||||
|
||||
class ZeroMQSocketMonitor:
|
||||
|
|
|
@ -19,6 +19,8 @@
|
|||
http://stackoverflow.com/questions/6190331/
|
||||
"""
|
||||
|
||||
# pragma: no cover # essentially using Python's OrderDict
|
||||
|
||||
|
||||
from collections.abc import Callable
|
||||
|
||||
|
|
|
@ -162,6 +162,7 @@ def test_pub_server_channel(
|
|||
log.debug("Payload handler got %r", payload)
|
||||
|
||||
req_server_channel.post_fork(handle_payload, io_loop=io_loop)
|
||||
|
||||
if master_config["transport"] == "zeromq":
|
||||
p = Path(str(master_config["sock_dir"])) / "workers.ipc"
|
||||
start = time.time()
|
||||
|
|
|
@ -0,0 +1,55 @@
|
|||
import multiprocessing
|
||||
import socket
|
||||
import threading
|
||||
import time
|
||||
|
||||
import pytest
|
||||
|
||||
import salt.transport.tcp
|
||||
|
||||
pytestmark = [
|
||||
pytest.mark.core_test,
|
||||
]
|
||||
|
||||
|
||||
def test_tcp_load_balancer_server(master_opts, io_loop):
|
||||
|
||||
messages = []
|
||||
|
||||
def handler(stream, message, header):
|
||||
messages.append(message)
|
||||
|
||||
queue = multiprocessing.Queue()
|
||||
server = salt.transport.tcp.LoadBalancerServer(master_opts, queue)
|
||||
worker = salt.transport.tcp.LoadBalancerWorker(queue, handler, io_loop=io_loop)
|
||||
|
||||
def run_loop():
|
||||
io_loop.start()
|
||||
|
||||
loop_thread = threading.Thread(target=run_loop)
|
||||
loop_thread.start()
|
||||
|
||||
thread = threading.Thread(target=server.run)
|
||||
thread.start()
|
||||
|
||||
# Wait for bind to happen.
|
||||
time.sleep(0.5)
|
||||
|
||||
package = {"foo": "bar"}
|
||||
payload = salt.transport.frame.frame_msg(package)
|
||||
sock = socket.socket()
|
||||
sock.connect(("127.0.0.1", master_opts["ret_port"]))
|
||||
sock.send(payload)
|
||||
|
||||
try:
|
||||
start = time.monotonic()
|
||||
while not messages:
|
||||
time.sleep(0.3)
|
||||
if time.monotonic() - start > 30:
|
||||
assert False, "Took longer than 30 seconds to receive message"
|
||||
assert [package] == messages
|
||||
finally:
|
||||
server.close()
|
||||
thread.join()
|
||||
io_loop.stop()
|
||||
worker.close()
|
59
tests/pytests/functional/transport/tcp/test_pub_server.py
Normal file
59
tests/pytests/functional/transport/tcp/test_pub_server.py
Normal file
|
@ -0,0 +1,59 @@
|
|||
import threading
|
||||
import time
|
||||
|
||||
import tornado.gen
|
||||
|
||||
import salt.transport.tcp
|
||||
|
||||
|
||||
async def test_pub_channel(master_opts, minion_opts, io_loop):
|
||||
def presence_callback(client):
|
||||
pass
|
||||
|
||||
def remove_presence_callback(client):
|
||||
pass
|
||||
|
||||
master_opts["transport"] = "tcp"
|
||||
minion_opts.update(master_ip="127.0.0.1", transport="tcp")
|
||||
|
||||
server = salt.transport.tcp.TCPPublishServer(master_opts)
|
||||
|
||||
client = salt.transport.tcp.TCPPubClient(minion_opts, io_loop)
|
||||
|
||||
payloads = []
|
||||
|
||||
publishes = []
|
||||
|
||||
def publish_payload(payload, callback):
|
||||
server.publish_payload(payload)
|
||||
payloads.append(payload)
|
||||
|
||||
def on_recv(message):
|
||||
print("ON RECV")
|
||||
publishes.append(message)
|
||||
|
||||
thread = threading.Thread(
|
||||
target=server.publish_daemon,
|
||||
args=(publish_payload, presence_callback, remove_presence_callback),
|
||||
)
|
||||
thread.start()
|
||||
|
||||
# Wait for socket to bind.
|
||||
time.sleep(3)
|
||||
|
||||
await client.connect(master_opts["publish_port"])
|
||||
client.on_recv(on_recv)
|
||||
|
||||
print("Publish message")
|
||||
server.publish({"meh": "bah"})
|
||||
|
||||
start = time.monotonic()
|
||||
try:
|
||||
while not publishes:
|
||||
await tornado.gen.sleep(0.3)
|
||||
if time.monotonic() - start > 30:
|
||||
assert False, "Message not published after 30 seconds"
|
||||
finally:
|
||||
server.io_loop.stop()
|
||||
thread.join()
|
||||
server.io_loop.close(all_fds=True)
|
|
@ -1,7 +1,10 @@
|
|||
import logging
|
||||
import threading
|
||||
import time
|
||||
|
||||
import pytest
|
||||
|
||||
import salt.transport.zeromq
|
||||
from tests.support.mock import MagicMock, patch
|
||||
from tests.support.pytest.transport import PubServerChannelProcess
|
||||
|
||||
|
@ -51,3 +54,86 @@ def test_zeromq_filtering(salt_master, salt_minion):
|
|||
assert len(results) == send_num, "{} != {}, difference: {}".format(
|
||||
len(results), send_num, set(expect).difference(results)
|
||||
)
|
||||
|
||||
|
||||
def test_pub_channel(master_opts):
|
||||
server = salt.transport.zeromq.PublishServer(master_opts)
|
||||
|
||||
payloads = []
|
||||
|
||||
def publish_payload(payload):
|
||||
server.publish_payload(payload)
|
||||
payloads.append(payload)
|
||||
|
||||
thread = threading.Thread(target=server.publish_daemon, args=(publish_payload,))
|
||||
thread.start()
|
||||
|
||||
server.publish({"meh": "bah"})
|
||||
|
||||
start = time.monotonic()
|
||||
try:
|
||||
while not payloads:
|
||||
time.sleep(0.3)
|
||||
if time.monotonic() - start > 30:
|
||||
assert False, "No message received after 30 seconds"
|
||||
finally:
|
||||
server.close()
|
||||
server.io_loop.stop()
|
||||
thread.join()
|
||||
server.io_loop.close(all_fds=True)
|
||||
|
||||
|
||||
def test_pub_channel_filtering(master_opts):
|
||||
master_opts["zmq_filtering"] = True
|
||||
server = salt.transport.zeromq.PublishServer(master_opts)
|
||||
|
||||
payloads = []
|
||||
|
||||
def publish_payload(payload):
|
||||
server.publish_payload(payload)
|
||||
payloads.append(payload)
|
||||
|
||||
thread = threading.Thread(target=server.publish_daemon, args=(publish_payload,))
|
||||
thread.start()
|
||||
|
||||
server.publish({"meh": "bah"})
|
||||
|
||||
start = time.monotonic()
|
||||
try:
|
||||
while not payloads:
|
||||
time.sleep(0.3)
|
||||
if time.monotonic() - start > 30:
|
||||
assert False, "No message received after 30 seconds"
|
||||
finally:
|
||||
server.close()
|
||||
server.io_loop.stop()
|
||||
thread.join()
|
||||
server.io_loop.close(all_fds=True)
|
||||
|
||||
|
||||
def test_pub_channel_filtering_topic(master_opts):
|
||||
master_opts["zmq_filtering"] = True
|
||||
server = salt.transport.zeromq.PublishServer(master_opts)
|
||||
|
||||
payloads = []
|
||||
|
||||
def publish_payload(payload):
|
||||
server.publish_payload(payload, topic_list=["meh"])
|
||||
payloads.append(payload)
|
||||
|
||||
thread = threading.Thread(target=server.publish_daemon, args=(publish_payload,))
|
||||
thread.start()
|
||||
|
||||
server.publish({"meh": "bah"})
|
||||
|
||||
start = time.monotonic()
|
||||
try:
|
||||
while not payloads:
|
||||
time.sleep(0.3)
|
||||
if time.monotonic() - start > 30:
|
||||
assert False, "No message received after 30 seconds"
|
||||
finally:
|
||||
server.close()
|
||||
server.io_loop.stop()
|
||||
thread.join()
|
||||
server.io_loop.close(all_fds=True)
|
||||
|
|
|
@ -5,6 +5,10 @@ import socket
|
|||
import attr
|
||||
import pytest
|
||||
import tornado
|
||||
import tornado.concurrent
|
||||
import tornado.gen
|
||||
import tornado.ioloop
|
||||
import tornado.iostream
|
||||
from pytestshellutils.utils import ports
|
||||
|
||||
import salt.channel.server
|
||||
|
@ -12,7 +16,7 @@ import salt.exceptions
|
|||
import salt.transport.tcp
|
||||
from tests.support.mock import MagicMock, PropertyMock, patch
|
||||
|
||||
pytestmark = [
|
||||
tpytestmark = [
|
||||
pytest.mark.core_test,
|
||||
]
|
||||
|
||||
|
@ -483,3 +487,185 @@ async def test_presence_removed_on_stream_closed():
|
|||
await server.publish_payload(package, None)
|
||||
|
||||
server.remove_presence_callback.assert_called_with(client)
|
||||
|
||||
|
||||
async def test_tcp_pub_client_decode_dict(minion_opts, io_loop):
|
||||
dmsg = {"meh": "bah"}
|
||||
client = salt.transport.tcp.TCPPubClient(minion_opts, io_loop)
|
||||
assert dmsg == await client._decode_messages(dmsg)
|
||||
|
||||
|
||||
async def test_tcp_pub_client_decode_msgpack(minion_opts, io_loop):
|
||||
dmsg = {"meh": "bah"}
|
||||
msg = salt.payload.dumps(dmsg)
|
||||
client = salt.transport.tcp.TCPPubClient(minion_opts, io_loop)
|
||||
assert dmsg == await client._decode_messages(msg)
|
||||
|
||||
|
||||
def test_tcp_pub_client_close(minion_opts, io_loop):
|
||||
client = salt.transport.tcp.TCPPubClient(minion_opts, io_loop)
|
||||
|
||||
message_client = MagicMock()
|
||||
|
||||
client.message_client = message_client
|
||||
client.close()
|
||||
assert client._closing is True
|
||||
assert client.message_client is None
|
||||
client.close()
|
||||
message_client.close.assert_called_once_with()
|
||||
|
||||
|
||||
async def test_pub_server__stream_read(master_opts, io_loop):
|
||||
|
||||
messages = [salt.transport.frame.frame_msg({"foo": "bar"})]
|
||||
|
||||
class Stream:
|
||||
def __init__(self, messages):
|
||||
self.messages = messages
|
||||
|
||||
def read_bytes(self, *args, **kwargs):
|
||||
if self.messages:
|
||||
msg = self.messages.pop(0)
|
||||
future = tornado.concurrent.Future()
|
||||
future.set_result(msg)
|
||||
return future
|
||||
raise tornado.iostream.StreamClosedError()
|
||||
|
||||
client = MagicMock()
|
||||
client.stream = Stream(messages)
|
||||
client.address = "client address"
|
||||
server = salt.transport.tcp.PubServer(master_opts, io_loop)
|
||||
await server._stream_read(client)
|
||||
client.close.assert_called_once()
|
||||
|
||||
|
||||
async def test_pub_server__stream_read_exception(master_opts, io_loop):
|
||||
client = MagicMock()
|
||||
client.stream = MagicMock()
|
||||
client.stream.read_bytes = MagicMock(
|
||||
side_effect=[
|
||||
Exception("Something went wrong"),
|
||||
tornado.iostream.StreamClosedError(),
|
||||
]
|
||||
)
|
||||
client.address = "client address"
|
||||
server = salt.transport.tcp.PubServer(master_opts, io_loop)
|
||||
await server._stream_read(client)
|
||||
client.close.assert_called_once()
|
||||
|
||||
|
||||
async def test_salt_message_server(master_opts):
|
||||
|
||||
received = []
|
||||
|
||||
def handler(stream, body, header):
|
||||
|
||||
received.append(body)
|
||||
|
||||
server = salt.transport.tcp.SaltMessageServer(handler)
|
||||
msg = {"foo": "bar"}
|
||||
messages = [salt.transport.frame.frame_msg(msg)]
|
||||
|
||||
class Stream:
|
||||
def __init__(self, messages):
|
||||
self.messages = messages
|
||||
|
||||
def read_bytes(self, *args, **kwargs):
|
||||
if self.messages:
|
||||
msg = self.messages.pop(0)
|
||||
future = tornado.concurrent.Future()
|
||||
future.set_result(msg)
|
||||
return future
|
||||
raise tornado.iostream.StreamClosedError()
|
||||
|
||||
stream = Stream(messages)
|
||||
address = "client address"
|
||||
|
||||
await server.handle_stream(stream, address)
|
||||
|
||||
# Let loop iterate so callback gets called
|
||||
await tornado.gen.sleep(0.01)
|
||||
|
||||
assert received
|
||||
assert [msg] == received
|
||||
|
||||
|
||||
async def test_salt_message_server_exception(master_opts, io_loop):
|
||||
received = []
|
||||
|
||||
def handler(stream, body, header):
|
||||
|
||||
received.append(body)
|
||||
|
||||
stream = MagicMock()
|
||||
stream.read_bytes = MagicMock(
|
||||
side_effect=[
|
||||
Exception("Something went wrong"),
|
||||
]
|
||||
)
|
||||
address = "client address"
|
||||
server = salt.transport.tcp.SaltMessageServer(handler)
|
||||
await server.handle_stream(stream, address)
|
||||
stream.close.assert_called_once()
|
||||
|
||||
|
||||
async def test_message_client_stream_return_exception(minion_opts, io_loop):
|
||||
msg = {"foo": "bar"}
|
||||
payload = salt.transport.frame.frame_msg(msg)
|
||||
future = tornado.concurrent.Future()
|
||||
future.set_result(payload)
|
||||
client = salt.transport.tcp.MessageClient(
|
||||
minion_opts,
|
||||
"127.0.0.1",
|
||||
12345,
|
||||
connect_callback=MagicMock(),
|
||||
disconnect_callback=MagicMock(),
|
||||
)
|
||||
client._stream = MagicMock()
|
||||
client._stream.read_bytes.side_effect = [
|
||||
future,
|
||||
]
|
||||
try:
|
||||
io_loop.add_callback(client._stream_return)
|
||||
await tornado.gen.sleep(0.01)
|
||||
client.close()
|
||||
await tornado.gen.sleep(0.01)
|
||||
assert client._stream is None
|
||||
finally:
|
||||
client.close()
|
||||
|
||||
|
||||
def test_tcp_pub_server_pre_fork(master_opts):
|
||||
process_manager = MagicMock()
|
||||
server = salt.transport.tcp.TCPPublishServer(master_opts)
|
||||
server.pre_fork(process_manager)
|
||||
|
||||
|
||||
async def test_pub_server_publish_payload(master_opts, io_loop):
|
||||
server = salt.transport.tcp.PubServer(master_opts, io_loop=io_loop)
|
||||
package = {"foo": "bar"}
|
||||
topic_list = ["meh"]
|
||||
future = tornado.concurrent.Future()
|
||||
future.set_result(None)
|
||||
client = MagicMock()
|
||||
client.stream = MagicMock()
|
||||
client.stream.write.side_effect = [future]
|
||||
client.id_ = "meh"
|
||||
server.clients = [client]
|
||||
await server.publish_payload(package, topic_list)
|
||||
client.stream.write.assert_called_once()
|
||||
|
||||
|
||||
async def test_pub_server_publish_payload_closed_stream(master_opts, io_loop):
|
||||
server = salt.transport.tcp.PubServer(master_opts, io_loop=io_loop)
|
||||
package = {"foo": "bar"}
|
||||
topic_list = ["meh"]
|
||||
client = MagicMock()
|
||||
client.stream = MagicMock()
|
||||
client.stream.write.side_effect = [
|
||||
tornado.iostream.StreamClosedError("mock"),
|
||||
]
|
||||
client.id_ = "meh"
|
||||
server.clients = {client}
|
||||
await server.publish_payload(package, topic_list)
|
||||
assert server.clients == set()
|
||||
|
|
|
@ -1,10 +1,8 @@
|
|||
import msgpack
|
||||
import pytest
|
||||
import tornado.concurrent
|
||||
|
||||
import salt.config
|
||||
import salt.transport.zeromq
|
||||
from tests.support.mock import MagicMock
|
||||
|
||||
|
||||
async def test_req_server_garbage_request(io_loop):
|
||||
|
@ -13,6 +11,7 @@ async def test_req_server_garbage_request(io_loop):
|
|||
RequestServers's message handler.
|
||||
"""
|
||||
opts = salt.config.master_config("")
|
||||
opts["zmq_monitor"] = True
|
||||
request_server = salt.transport.zeromq.RequestServer(opts)
|
||||
|
||||
def message_handler(payload):
|
||||
|
@ -37,20 +36,20 @@ async def test_client_timeout_msg(minion_opts):
|
|||
client = salt.transport.zeromq.AsyncReqMessageClient(
|
||||
minion_opts, "tcp://127.0.0.1:4506"
|
||||
)
|
||||
assert hasattr(client, "_future")
|
||||
assert client._future is None
|
||||
future = tornado.concurrent.Future()
|
||||
client._future = future
|
||||
client.timeout_message(future)
|
||||
with pytest.raises(salt.exceptions.SaltReqTimeoutError):
|
||||
await future
|
||||
assert client._future is None
|
||||
client.connect()
|
||||
try:
|
||||
with pytest.raises(salt.exceptions.SaltReqTimeoutError):
|
||||
await client.send({"meh": "bah"}, 1)
|
||||
finally:
|
||||
client.close()
|
||||
|
||||
future_a = tornado.concurrent.Future()
|
||||
future_b = tornado.concurrent.Future()
|
||||
future_b.set_exception = MagicMock()
|
||||
client._future = future_a
|
||||
client.timeout_message(future_b)
|
||||
|
||||
assert client._future == future_a
|
||||
future_b.set_exception.assert_not_called()
|
||||
def test_pub_client_init(minion_opts, io_loop):
|
||||
minion_opts["id"] = "minion"
|
||||
minion_opts["__role"] = "syndic"
|
||||
minion_opts["master_ip"] = "127.0.0.1"
|
||||
minion_opts["zmq_filtering"] = True
|
||||
minion_opts["zmq_monitor"] = True
|
||||
client = salt.transport.zeromq.PublishClient(minion_opts, io_loop)
|
||||
client.send(b"asf")
|
||||
client.close()
|
||||
|
|
File diff suppressed because it is too large
Load diff
180
tests/pytests/unit/utils/test_listdiffer.py
Normal file
180
tests/pytests/unit/utils/test_listdiffer.py
Normal file
|
@ -0,0 +1,180 @@
|
|||
import pytest
|
||||
|
||||
from salt.utils.dictdiffer import RecursiveDictDiffer
|
||||
from salt.utils.listdiffer import list_diff
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def get_old_list():
|
||||
return [
|
||||
{"key": 1, "value": "foo1", "int_value": 101},
|
||||
{"key": 2, "value": "foo2", "int_value": 102},
|
||||
{"key": 3, "value": "foo3", "int_value": 103},
|
||||
]
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def get_new_list():
|
||||
return [
|
||||
{"key": 1, "value": "foo1", "int_value": 101},
|
||||
{"key": 2, "value": "foo2", "int_value": 112},
|
||||
{"key": 5, "value": "foo5", "int_value": 105},
|
||||
]
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def get_list_diff(get_old_list, get_new_list):
|
||||
return list_diff(get_old_list, get_new_list, key="key")
|
||||
|
||||
|
||||
def test_added(get_list_diff):
|
||||
assert len(get_list_diff.added) == 1
|
||||
assert get_list_diff.added[0] == {"key": 5, "value": "foo5", "int_value": 105}
|
||||
|
||||
|
||||
def test_removed(get_list_diff):
|
||||
assert len(get_list_diff.removed) == 1
|
||||
assert get_list_diff.removed[0] == {"key": 3, "value": "foo3", "int_value": 103}
|
||||
|
||||
|
||||
def test_diffs(get_list_diff):
|
||||
assert len(get_list_diff.diffs) == 3
|
||||
assert get_list_diff.diffs[0] == {2: {"int_value": {"new": 112, "old": 102}}}
|
||||
|
||||
# Added items
|
||||
assert get_list_diff.diffs[1] == {
|
||||
5: {
|
||||
"int_value": {"new": 105, "old": RecursiveDictDiffer.NONE_VALUE},
|
||||
"key": {"new": 5, "old": RecursiveDictDiffer.NONE_VALUE},
|
||||
"value": {"new": "foo5", "old": RecursiveDictDiffer.NONE_VALUE},
|
||||
}
|
||||
}
|
||||
|
||||
# Removed items
|
||||
assert get_list_diff.diffs[2] == {
|
||||
3: {
|
||||
"int_value": {"new": RecursiveDictDiffer.NONE_VALUE, "old": 103},
|
||||
"key": {"new": RecursiveDictDiffer.NONE_VALUE, "old": 3},
|
||||
"value": {"new": RecursiveDictDiffer.NONE_VALUE, "old": "foo3"},
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
def test_new_values(get_list_diff):
|
||||
assert len(get_list_diff.new_values) == 2
|
||||
assert get_list_diff.new_values[0] == {"key": 2, "int_value": 112}
|
||||
assert get_list_diff.new_values[1] == {"key": 5, "value": "foo5", "int_value": 105}
|
||||
|
||||
|
||||
def test_old_values(get_list_diff):
|
||||
assert len(get_list_diff.old_values) == 2
|
||||
assert get_list_diff.old_values[0] == {"key": 2, "int_value": 102}
|
||||
assert get_list_diff.old_values[1] == {"key": 3, "value": "foo3", "int_value": 103}
|
||||
|
||||
|
||||
def test_changed_all(get_list_diff):
|
||||
assert get_list_diff.changed(selection="all") == [
|
||||
"key.2.int_value",
|
||||
"key.5.int_value",
|
||||
"key.5.value",
|
||||
"key.3.int_value",
|
||||
"key.3.value",
|
||||
]
|
||||
|
||||
|
||||
def test_changed_intersect(get_list_diff):
|
||||
assert get_list_diff.changed(selection="intersect") == ["key.2.int_value"]
|
||||
|
||||
|
||||
def test_changes_str(get_list_diff):
|
||||
expected = """\tidentified by key 2:
|
||||
\tint_value from 102 to 112
|
||||
\tidentified by key 3:
|
||||
\twill be removed
|
||||
\tidentified by key 5:
|
||||
\twill be added
|
||||
"""
|
||||
assert get_list_diff.changes_str == expected
|
||||
|
||||
|
||||
def test_intersect(get_list_diff):
|
||||
expected = [
|
||||
{
|
||||
"key": 1,
|
||||
"old": {"key": 1, "value": "foo1", "int_value": 101},
|
||||
"new": {"key": 1, "value": "foo1", "int_value": 101},
|
||||
},
|
||||
{
|
||||
"key": 2,
|
||||
"old": {"key": 2, "value": "foo2", "int_value": 102},
|
||||
"new": {"key": 2, "value": "foo2", "int_value": 112},
|
||||
},
|
||||
]
|
||||
test_isect = get_list_diff.intersect
|
||||
assert test_isect == expected
|
||||
|
||||
|
||||
def test_remove_diff_intersect(get_list_diff):
|
||||
expected = [
|
||||
{
|
||||
"key": 1,
|
||||
"old": {"key": 1, "int_value": 101},
|
||||
"new": {"key": 1, "int_value": 101},
|
||||
},
|
||||
{
|
||||
"key": 2,
|
||||
"old": {"key": 2, "int_value": 102},
|
||||
"new": {"key": 2, "int_value": 112},
|
||||
},
|
||||
]
|
||||
|
||||
get_list_diff.remove_diff(diff_key="value")
|
||||
test_isect = get_list_diff.intersect
|
||||
assert test_isect == expected
|
||||
|
||||
|
||||
def test_remove_diff_removed(get_list_diff):
|
||||
expected = [
|
||||
{
|
||||
"key": 1,
|
||||
"old": {"key": 1, "value": "foo1", "int_value": 101},
|
||||
"new": {"key": 1, "value": "foo1", "int_value": 101},
|
||||
},
|
||||
{
|
||||
"key": 2,
|
||||
"old": {"key": 2, "value": "foo2", "int_value": 102},
|
||||
"new": {"key": 2, "value": "foo2", "int_value": 112},
|
||||
},
|
||||
]
|
||||
get_list_diff.remove_diff(diff_key="value", diff_list="removed")
|
||||
test_isect = get_list_diff.intersect
|
||||
assert test_isect == expected
|
||||
|
||||
|
||||
def test_changes_str2(get_list_diff):
|
||||
expected = """ key=2 (updated):
|
||||
int_value from 102 to 112
|
||||
key=3 (removed)
|
||||
key=5 (added): {'key': 5, 'value': 'foo5', 'int_value': 105}"""
|
||||
test_changes = get_list_diff.changes_str2
|
||||
assert test_changes == expected
|
||||
|
||||
|
||||
def test_current_list(get_list_diff):
|
||||
expected = [
|
||||
{"key": 1, "value": "foo1", "int_value": 101},
|
||||
{"key": 2, "value": "foo2", "int_value": 102},
|
||||
{"key": 3, "value": "foo3", "int_value": 103},
|
||||
]
|
||||
test_curr_list = get_list_diff.current_list
|
||||
assert test_curr_list == expected
|
||||
|
||||
|
||||
def test_new_list(get_list_diff):
|
||||
expected = [
|
||||
{"key": 1, "value": "foo1", "int_value": 101},
|
||||
{"key": 2, "value": "foo2", "int_value": 112},
|
||||
{"key": 5, "value": "foo5", "int_value": 105},
|
||||
]
|
||||
test_new_list = get_list_diff.new_list
|
||||
assert test_new_list == expected
|
File diff suppressed because it is too large
Load diff
|
@ -1,109 +0,0 @@
|
|||
from salt.utils import dictdiffer
|
||||
from salt.utils.listdiffer import list_diff
|
||||
from tests.support.unit import TestCase
|
||||
|
||||
NONE = dictdiffer.RecursiveDictDiffer.NONE_VALUE
|
||||
|
||||
|
||||
class ListDictDifferTestCase(TestCase):
|
||||
def setUp(self):
|
||||
old_list = [
|
||||
{"key": 1, "value": "foo1", "int_value": 101},
|
||||
{"key": 2, "value": "foo2", "int_value": 102},
|
||||
{"key": 3, "value": "foo3", "int_value": 103},
|
||||
]
|
||||
new_list = [
|
||||
{"key": 1, "value": "foo1", "int_value": 101},
|
||||
{"key": 2, "value": "foo2", "int_value": 112},
|
||||
{"key": 5, "value": "foo5", "int_value": 105},
|
||||
]
|
||||
self.list_diff = list_diff(old_list, new_list, key="key")
|
||||
|
||||
def tearDown(self):
|
||||
for attrname in ("list_diff",):
|
||||
try:
|
||||
delattr(self, attrname)
|
||||
except AttributeError:
|
||||
continue
|
||||
|
||||
def test_added(self):
|
||||
self.assertEqual(len(self.list_diff.added), 1)
|
||||
self.assertDictEqual(
|
||||
self.list_diff.added[0], {"key": 5, "value": "foo5", "int_value": 105}
|
||||
)
|
||||
|
||||
def test_removed(self):
|
||||
self.assertEqual(len(self.list_diff.removed), 1)
|
||||
self.assertDictEqual(
|
||||
self.list_diff.removed[0], {"key": 3, "value": "foo3", "int_value": 103}
|
||||
)
|
||||
|
||||
def test_diffs(self):
|
||||
self.assertEqual(len(self.list_diff.diffs), 3)
|
||||
self.assertDictEqual(
|
||||
self.list_diff.diffs[0], {2: {"int_value": {"new": 112, "old": 102}}}
|
||||
)
|
||||
self.assertDictEqual(
|
||||
self.list_diff.diffs[1],
|
||||
# Added items
|
||||
{
|
||||
5: {
|
||||
"int_value": {"new": 105, "old": NONE},
|
||||
"key": {"new": 5, "old": NONE},
|
||||
"value": {"new": "foo5", "old": NONE},
|
||||
}
|
||||
},
|
||||
)
|
||||
self.assertDictEqual(
|
||||
self.list_diff.diffs[2],
|
||||
# Removed items
|
||||
{
|
||||
3: {
|
||||
"int_value": {"new": NONE, "old": 103},
|
||||
"key": {"new": NONE, "old": 3},
|
||||
"value": {"new": NONE, "old": "foo3"},
|
||||
}
|
||||
},
|
||||
)
|
||||
|
||||
def test_new_values(self):
|
||||
self.assertEqual(len(self.list_diff.new_values), 2)
|
||||
self.assertDictEqual(self.list_diff.new_values[0], {"key": 2, "int_value": 112})
|
||||
self.assertDictEqual(
|
||||
self.list_diff.new_values[1], {"key": 5, "value": "foo5", "int_value": 105}
|
||||
)
|
||||
|
||||
def test_old_values(self):
|
||||
self.assertEqual(len(self.list_diff.old_values), 2)
|
||||
self.assertDictEqual(self.list_diff.old_values[0], {"key": 2, "int_value": 102})
|
||||
self.assertDictEqual(
|
||||
self.list_diff.old_values[1], {"key": 3, "value": "foo3", "int_value": 103}
|
||||
)
|
||||
|
||||
def test_changed_all(self):
|
||||
self.assertEqual(
|
||||
self.list_diff.changed(selection="all"),
|
||||
[
|
||||
"key.2.int_value",
|
||||
"key.5.int_value",
|
||||
"key.5.value",
|
||||
"key.3.int_value",
|
||||
"key.3.value",
|
||||
],
|
||||
)
|
||||
|
||||
def test_changed_intersect(self):
|
||||
self.assertEqual(
|
||||
self.list_diff.changed(selection="intersect"), ["key.2.int_value"]
|
||||
)
|
||||
|
||||
def test_changes_str(self):
|
||||
self.assertEqual(
|
||||
self.list_diff.changes_str,
|
||||
"\tidentified by key 2:\n"
|
||||
"\tint_value from 102 to 112\n"
|
||||
"\tidentified by key 3:\n"
|
||||
"\twill be removed\n"
|
||||
"\tidentified by key 5:\n"
|
||||
"\twill be added\n",
|
||||
)
|
16
tools/ci.py
16
tools/ci.py
|
@ -18,6 +18,7 @@ import yaml
|
|||
from ptscripts import Context, command_group
|
||||
|
||||
import tools.utils
|
||||
import tools.utils.gh
|
||||
|
||||
if sys.version_info < (3, 11):
|
||||
from typing_extensions import NotRequired, TypedDict
|
||||
|
@ -622,9 +623,12 @@ def define_testrun(ctx: Context, event_name: str, changed_files: pathlib.Path):
|
|||
"full": {
|
||||
"help": "Full test run",
|
||||
},
|
||||
"workflow": {
|
||||
"help": "Which workflow is running",
|
||||
},
|
||||
},
|
||||
)
|
||||
def matrix(ctx: Context, distro_slug: str, full: bool = False):
|
||||
def matrix(ctx: Context, distro_slug: str, full: bool = False, workflow: str = "ci"):
|
||||
"""
|
||||
Generate the test matrix.
|
||||
"""
|
||||
|
@ -635,6 +639,11 @@ def matrix(ctx: Context, distro_slug: str, full: bool = False):
|
|||
"scenarios": 1,
|
||||
"unit": 2,
|
||||
}
|
||||
# On nightly and scheduled builds we don't want splits at all
|
||||
if workflow.lower() in ("nightly", "scheduled"):
|
||||
ctx.info(f"Clearning splits definition since workflow is '{workflow}'")
|
||||
_splits.clear()
|
||||
|
||||
for transport in ("zeromq", "tcp"):
|
||||
if transport == "tcp":
|
||||
if distro_slug not in (
|
||||
|
@ -964,8 +973,9 @@ def _get_pr_test_labels_from_api(
|
|||
headers = {
|
||||
"Accept": "application/vnd.github+json",
|
||||
}
|
||||
if "GITHUB_TOKEN" in os.environ:
|
||||
headers["Authorization"] = f"Bearer {os.environ['GITHUB_TOKEN']}"
|
||||
github_token = tools.utils.gh.get_github_token(ctx)
|
||||
if github_token is not None:
|
||||
headers["Authorization"] = f"Bearer {github_token}"
|
||||
web.headers.update(headers)
|
||||
ret = web.get(f"https://api.github.com/repos/{repository}/pulls/{pr}")
|
||||
if ret.status_code != 200:
|
||||
|
|
|
@ -467,7 +467,12 @@ def github(
|
|||
with open(github_output, "a", encoding="utf-8") as wfh:
|
||||
wfh.write(f"release-messsage-file={release_message_path.resolve()}\n")
|
||||
|
||||
releases = get_salt_releases(ctx, repository)
|
||||
try:
|
||||
releases = get_salt_releases(ctx, repository)
|
||||
except SystemExit:
|
||||
ctx.warn(f"Failed to get salt releases from repository '{repository}'")
|
||||
releases = get_salt_releases(ctx, "saltstack/salt")
|
||||
|
||||
if Version(salt_version) >= releases[-1]:
|
||||
make_latest = True
|
||||
else:
|
||||
|
|
|
@ -137,13 +137,19 @@ def get_salt_releases(ctx: Context, repository: str) -> list[Version]:
|
|||
"""
|
||||
Return a list of salt versions
|
||||
"""
|
||||
# Deferred import
|
||||
import tools.utils.gh
|
||||
|
||||
ctx.info(f"Collecting salt releases from repository '{repository}'")
|
||||
|
||||
versions = set()
|
||||
with ctx.web as web:
|
||||
headers = {
|
||||
"Accept": "application/vnd.github+json",
|
||||
}
|
||||
if "GITHUB_TOKEN" in os.environ:
|
||||
headers["Authorization"] = f"Bearer {os.environ['GITHUB_TOKEN']}"
|
||||
github_token = tools.utils.gh.get_github_token(ctx)
|
||||
if github_token is not None:
|
||||
headers["Authorization"] = f"Bearer {github_token}"
|
||||
web.headers.update(headers)
|
||||
ret = web.get(f"https://api.github.com/repos/{repository}/tags")
|
||||
if ret.status_code != 200:
|
||||
|
|
|
@ -218,11 +218,20 @@ def get_github_token(ctx: Context) -> str | None:
|
|||
Get the GITHUB_TOKEN to be able to authenticate to the API.
|
||||
"""
|
||||
github_token = os.environ.get("GITHUB_TOKEN")
|
||||
if github_token is None:
|
||||
gh = shutil.which("gh")
|
||||
ret = ctx.run(gh, "auth", "token", check=False, capture=True)
|
||||
if ret.returncode == 0:
|
||||
github_token = ret.stdout.decode().strip() or None
|
||||
if github_token is not None:
|
||||
ctx.info("$GITHUB_TOKEN was found on the environ")
|
||||
return github_token
|
||||
|
||||
gh = shutil.which("gh")
|
||||
if gh is None:
|
||||
ctx.info("The 'gh' CLI tool is not available. Can't get a token using it.")
|
||||
return github_token
|
||||
|
||||
ret = ctx.run(gh, "auth", "token", check=False, capture=True)
|
||||
if ret.returncode == 0:
|
||||
ctx.info("Got the GitHub token from the 'gh' CLI tool")
|
||||
return ret.stdout.decode().strip() or None
|
||||
ctx.info("Failed to get the GitHub token from the 'gh' CLI tool")
|
||||
return github_token
|
||||
|
||||
|
||||
|
|
Loading…
Add table
Reference in a new issue