mirror of
https://github.com/saltstack/salt.git
synced 2025-04-17 10:10:20 +00:00
Merge branch '3006.x' into merge/3007.x/3006.x-last-merge
This commit is contained in:
commit
d625eaeea8
62 changed files with 3547 additions and 4783 deletions
94
.github/actions/ssh-tunnel/README.md
vendored
Normal file
94
.github/actions/ssh-tunnel/README.md
vendored
Normal file
|
@ -0,0 +1,94 @@
|
||||||
|
# SSH Tunnel
|
||||||
|
|
||||||
|
The ssh-tunnel action will create a reverse tunnel over webrtc to port 22 on the runner.
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
In order to use this action you must have a sdp offer from your local host and a ssh key pair.
|
||||||
|
Start with creating an sdp offer on your local machine. Provide these values to the ssh-tunnel
|
||||||
|
action and wait for output from the action with the sdp reply. Provide the reply to the local
|
||||||
|
rtcforward.py process by pasting it to stdin. If all goes well the local port on your maching
|
||||||
|
will be forwarded to the ssh port on the runner.
|
||||||
|
|
||||||
|
### Getting an sdp offer
|
||||||
|
|
||||||
|
To get an sdp offer start rtcforward.py on you local machine with the offer command.
|
||||||
|
You can also specify which port on the local machine will be used for the tunnel.
|
||||||
|
|
||||||
|
``` bash
|
||||||
|
$ python3 .github/actions/ssh-tunnel/rtcforward.py offer --port 5222
|
||||||
|
```
|
||||||
|
|
||||||
|
rtcforward.py will create an offer an display it to your terminal. (This example offer has been truncated)
|
||||||
|
After showing the offer the `rtcforward.py` process will wait for a reply.
|
||||||
|
```
|
||||||
|
-- offer --
|
||||||
|
eyJzZHAiOiAidj0wXHJcbm89LSAzOTQ3Mzg4NjUzIDM5NDczODg2NTMgSU4gSVA0IDAuMC4wLjBcclxu
|
||||||
|
cz0tXHJcbnQ9MCAwXHJcbmE9Z3JvdXA6QlVORExFIDBcclxuYT1tc2lkLXNlbWFudGljOldNUyAqXHJc
|
||||||
|
bm09YXBwbGljYXRpb24gMzUyNjkgRFRMUy9TQ1RQIDUwMDBcclxuYz1JTiBJUDQgMTkyLjE2OC4wLjIw
|
||||||
|
IHVkcCAxNjk0NDk4ODE1IDE4NC4xNzkuMjEwLjE1MiAzNTI2OSB0eXAgc3JmbHggcmFkZHIgMTkyLjE2
|
||||||
|
OC4wLjIwMSBycG9ydCAzNTI2OVxyXG5hPWNhbmRpZGF0ZTozZWFjMzJiZTZkY2RkMTAwZDcwMTFiNWY0
|
||||||
|
NTo4Qzo2MDoxMTpFQTo3NzpDMTo5RTo1QTo3QzpDQzowRDowODpFQzo2NDowQToxM1xyXG5hPWZpbmdl
|
||||||
|
cnByaW50OnNoYS01MTIgNjY6MzI6RUQ6MDA6N0I6QjY6NTQ6NzA6MzE6OTA6M0I6Mjg6Q0I6QTk6REU6
|
||||||
|
MzQ6QjI6NDY6NzE6NUI6MjM6ODA6Nzg6Njg6RDA6QTA6QTg6MjU6QkY6MDQ6ODY6NUY6OTA6QUY6MUQ6
|
||||||
|
QjA6QzY6ODA6QUY6OTc6QTI6MkM6NDI6QUU6MkI6Q0Q6Mjk6RUQ6MkI6ODc6NTU6ODg6NDY6QTM6ODk6
|
||||||
|
OEY6ODk6OTE6QTE6QTI6NDM6NTc6M0E6MjZcclxuYT1zZXR1cDphY3RwYXNzXHJcbiIsICJ0eXBlIjog
|
||||||
|
Im9mZmVyIn0=
|
||||||
|
-- end offer --
|
||||||
|
-- Please enter a message from remote party --
|
||||||
|
```
|
||||||
|
|
||||||
|
### Getting an sdp answer
|
||||||
|
|
||||||
|
Provide the offer to the ssh-tunnel action. When the action runs, an answer to the offer will be generated.
|
||||||
|
In the action output you will see that the offer was recieved and the reply in the output.
|
||||||
|
|
||||||
|
```
|
||||||
|
-- Please enter a message from remote party --
|
||||||
|
-- Message received --
|
||||||
|
-- reply --
|
||||||
|
eyJzZHAiOiAidj0wXHJcbm89LSAzOTQ3Mzg3NDcxIDM5NDczODc0NzEgSU4gSVA0IDAuMC4wLjBcclxu
|
||||||
|
cz0tXHJcbnQ9MCAwXHJcbmE9Z3JvdXA6QlVORExFIDBcclxuYT1tc2lkLXNlbWFudGljOldNUyAqXHJc
|
||||||
|
bm09YXBwbGljYXRpb24gNTcwMzkgRFRMUy9TQ1RQIDUwMDBcclxuYz1JTiBJUDQgMTkyLjE2OC42NC4x
|
||||||
|
MFxyXG5hPW1pZDowXHJcbmE9c2N0cG1hcDo1MDAwIHdlYnJ0Yy1kYXRhY2hhbm5lbCA2NTUzNVxyXG5h
|
||||||
|
MTc6MEI6RTA6OTA6QUM6RjU6RTk6RUI6Q0E6RUE6NTY6REI6NTA6QTk6REY6NTU6MzY6MkM6REI6OUE6
|
||||||
|
MDc6Mzc6QTM6NDc6NjlcclxuYT1maW5nZXJwcmludDpzaGEtNTEyIDMyOjRDOjk0OkRDOjNFOkU5OkU3
|
||||||
|
OjNCOjc5OjI4OjZDOjc5OkFEOkVDOjIzOkJDOjRBOjRBOjE5OjlCOjg5OkE3OkE2OjZBOjAwOjJFOkM5
|
||||||
|
OkE0OjlEOjAwOjM0OjFFOjRDOkVGOjcwOkY5OkNBOjg0OjlEOjcxOjI5OkVCOkIxOkREOkFEOjg5OjUx
|
||||||
|
OkZFOjhCOjI3OjFDOjFBOkJEOjUxOjQ2OjE4OjBBOjhFOjVBOjI1OjQzOjQzOjZGOkRBXHJcbmE9c2V0
|
||||||
|
dXA6YWN0aXZlXHJcbiIsICJ0eXBlIjogImFuc3dlciJ9
|
||||||
|
-- end reply --
|
||||||
|
```
|
||||||
|
|
||||||
|
# Finalizing the tunnel
|
||||||
|
|
||||||
|
Paste the sdp reply from the running action into the running `rtcforward.py` process that created the offer.
|
||||||
|
After receiveing the offer you will see `-- Message received --` and tunnel will be created.
|
||||||
|
|
||||||
|
```
|
||||||
|
-- offer --
|
||||||
|
eyJzZHAiOiAidj0wXHJcbm89LSAzOTQ3Mzg4NjUzIDM5NDczODg2NTMgSU4gSVA0IDAuMC4wLjBcclxu
|
||||||
|
cz0tXHJcbnQ9MCAwXHJcbmE9Z3JvdXA6QlVORExFIDBcclxuYT1tc2lkLXNlbWFudGljOldNUyAqXHJc
|
||||||
|
bm09YXBwbGljYXRpb24gMzUyNjkgRFRMUy9TQ1RQIDUwMDBcclxuYz1JTiBJUDQgMTkyLjE2OC4wLjIw
|
||||||
|
IHVkcCAxNjk0NDk4ODE1IDE4NC4xNzkuMjEwLjE1MiAzNTI2OSB0eXAgc3JmbHggcmFkZHIgMTkyLjE2
|
||||||
|
OC4wLjIwMSBycG9ydCAzNTI2OVxyXG5hPWNhbmRpZGF0ZTozZWFjMzJiZTZkY2RkMTAwZDcwMTFiNWY0
|
||||||
|
NTo4Qzo2MDoxMTpFQTo3NzpDMTo5RTo1QTo3QzpDQzowRDowODpFQzo2NDowQToxM1xyXG5hPWZpbmdl
|
||||||
|
cnByaW50OnNoYS01MTIgNjY6MzI6RUQ6MDA6N0I6QjY6NTQ6NzA6MzE6OTA6M0I6Mjg6Q0I6QTk6REU6
|
||||||
|
MzQ6QjI6NDY6NzE6NUI6MjM6ODA6Nzg6Njg6RDA6QTA6QTg6MjU6QkY6MDQ6ODY6NUY6OTA6QUY6MUQ6
|
||||||
|
QjA6QzY6ODA6QUY6OTc6QTI6MkM6NDI6QUU6MkI6Q0Q6Mjk6RUQ6MkI6ODc6NTU6ODg6NDY6QTM6ODk6
|
||||||
|
OEY6ODk6OTE6QTE6QTI6NDM6NTc6M0E6MjZcclxuYT1zZXR1cDphY3RwYXNzXHJcbiIsICJ0eXBlIjog
|
||||||
|
Im9mZmVyIn0=
|
||||||
|
-- end offer --
|
||||||
|
-- Please enter a message from remote party --
|
||||||
|
eyJzZHAiOiAidj0wXHJcbm89LSAzOTQ3Mzg3NDcxIDM5NDczODc0NzEgSU4gSVA0IDAuMC4wLjBcclxu
|
||||||
|
cz0tXHJcbnQ9MCAwXHJcbmE9Z3JvdXA6QlVORExFIDBcclxuYT1tc2lkLXNlbWFudGljOldNUyAqXHJc
|
||||||
|
bm09YXBwbGljYXRpb24gNTcwMzkgRFRMUy9TQ1RQIDUwMDBcclxuYz1JTiBJUDQgMTkyLjE2OC42NC4x
|
||||||
|
MFxyXG5hPW1pZDowXHJcbmE9c2N0cG1hcDo1MDAwIHdlYnJ0Yy1kYXRhY2hhbm5lbCA2NTUzNVxyXG5h
|
||||||
|
MTc6MEI6RTA6OTA6QUM6RjU6RTk6RUI6Q0E6RUE6NTY6REI6NTA6QTk6REY6NTU6MzY6MkM6REI6OUE6
|
||||||
|
MDc6Mzc6QTM6NDc6NjlcclxuYT1maW5nZXJwcmludDpzaGEtNTEyIDMyOjRDOjk0OkRDOjNFOkU5OkU3
|
||||||
|
OjNCOjc5OjI4OjZDOjc5OkFEOkVDOjIzOkJDOjRBOjRBOjE5OjlCOjg5OkE3OkE2OjZBOjAwOjJFOkM5
|
||||||
|
OkE0OjlEOjAwOjM0OjFFOjRDOkVGOjcwOkY5OkNBOjg0OjlEOjcxOjI5OkVCOkIxOkREOkFEOjg5OjUx
|
||||||
|
OkZFOjhCOjI3OjFDOjFBOkJEOjUxOjQ2OjE4OjBBOjhFOjVBOjI1OjQzOjQzOjZGOkRBXHJcbmE9c2V0
|
||||||
|
dXA6YWN0aXZlXHJcbiIsICJ0eXBlIjogImFuc3dlciJ9
|
||||||
|
-- Message received --
|
||||||
|
```
|
107
.github/actions/ssh-tunnel/action.yml
vendored
Normal file
107
.github/actions/ssh-tunnel/action.yml
vendored
Normal file
|
@ -0,0 +1,107 @@
|
||||||
|
name: ssh-tunnel
|
||||||
|
description: SSH Reverse Tunnel
|
||||||
|
|
||||||
|
inputs:
|
||||||
|
public_key:
|
||||||
|
required: true
|
||||||
|
type: string
|
||||||
|
description: Public key to accept for reverse tunnel. Warning, this should not be the public key for the 'private_key' input.
|
||||||
|
offer:
|
||||||
|
required: true
|
||||||
|
type: string
|
||||||
|
description: RTC offer
|
||||||
|
debug:
|
||||||
|
required: false
|
||||||
|
type: bool
|
||||||
|
default: false
|
||||||
|
description: Run sshd with debug enabled.
|
||||||
|
|
||||||
|
runs:
|
||||||
|
using: composite
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- uses: actions/setup-python@v5
|
||||||
|
with:
|
||||||
|
python-version: '3.10'
|
||||||
|
|
||||||
|
- name: Install ssh
|
||||||
|
if: ${{ runner.os == 'Windows' }}
|
||||||
|
shell: powershell
|
||||||
|
run: |
|
||||||
|
python3.exe -m pip install requests
|
||||||
|
python3.exe .github/actions/ssh-tunnel/installssh.py
|
||||||
|
|
||||||
|
- name: Start SSH
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
if [ "$RUNNER_OS" = "Windows" ]; then
|
||||||
|
powershell.exe -command "Start-Service sshd"
|
||||||
|
elif [ "$RUNNER_OS" = "macOS" ]; then
|
||||||
|
sudo launchctl load -w /System/Library/LaunchDaemons/ssh.plist
|
||||||
|
else
|
||||||
|
sudo systemctl start ssh
|
||||||
|
fi
|
||||||
|
|
||||||
|
- name: Show sshd configuration
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
if [ "$RUNNER_OS" = "Linux" ]; then
|
||||||
|
cat /etc/ssh/sshd_config
|
||||||
|
elif [ "$RUNNER_OS" = "macOS" ]; then
|
||||||
|
cat /private/etc/ssh/sshd_config
|
||||||
|
else
|
||||||
|
cat "C:\ProgramData\ssh\sshd_config"
|
||||||
|
fi
|
||||||
|
|
||||||
|
- name: Add ssh public key
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
if [ "$RUNNER_OS" = "Linux" ]; then
|
||||||
|
mkdir -p /home/runner/.ssh
|
||||||
|
chmod 700 /home/runner/.ssh
|
||||||
|
touch /home/runner/.ssh/authorized_keys
|
||||||
|
echo "${{ inputs.public_key }}" | tee -a /home/runner/.ssh/authorized_keys
|
||||||
|
elif [ "$RUNNER_OS" = "macOS" ]; then
|
||||||
|
mkdir -p /Users/runner/.ssh
|
||||||
|
chmod 700 /Users/runner/.ssh
|
||||||
|
touch /Users/runner/.ssh/authorized_keys
|
||||||
|
echo "${{ inputs.public_key }}" | tee -a /Users/runner/.ssh/authorized_keys
|
||||||
|
else
|
||||||
|
echo "${{ inputs.public_key }}" | tee -a "C:\ProgramData\ssh\administrators_authorized_keys"
|
||||||
|
fi
|
||||||
|
|
||||||
|
- name: Stop SSHD
|
||||||
|
if: ${{ inputs.debug }}
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
if [ "${{ inputs.debug }}" = "true" ]; then
|
||||||
|
if [ "$RUNNER_OS" = "Windows" ]; then
|
||||||
|
powershell.exe -command "Stop-Service sshd"
|
||||||
|
elif [ "$RUNNER_OS" = "macOS" ]; then
|
||||||
|
sudo launchctl unload /System/Library/LaunchDaemons/ssh.plist
|
||||||
|
else
|
||||||
|
sudo systemctl stop ssh
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
- name: Create rtc tunnel
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
if [ "${{ inputs.debug }}" = "true" ]; then
|
||||||
|
if [ "$RUNNER_OS" = "Windows" ]; then
|
||||||
|
./OpenSSH-Win64/sshd.exe -d &
|
||||||
|
elif [ "$RUNNER_OS" = "macOS" ]; then
|
||||||
|
sudo /usr/sbin/sshd -d &
|
||||||
|
else
|
||||||
|
sudo mkdir -p /run/sshd
|
||||||
|
sudo chmod 755 /run/sshd
|
||||||
|
sudo /usr/sbin/sshd -d &
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
if [ "$RUNNER_OS" = "Windows" ]; then
|
||||||
|
python3 -m pip install aiortc
|
||||||
|
else
|
||||||
|
python3 -m pip install aiortc uvloop
|
||||||
|
fi
|
||||||
|
echo '${{ inputs.offer }}' | python .github/actions/ssh-tunnel/rtcforward.py --port 22 answer
|
44
.github/actions/ssh-tunnel/installssh.py
vendored
Normal file
44
.github/actions/ssh-tunnel/installssh.py
vendored
Normal file
|
@ -0,0 +1,44 @@
|
||||||
|
"""
|
||||||
|
"""
|
||||||
|
|
||||||
|
import pathlib
|
||||||
|
import subprocess
|
||||||
|
import zipfile
|
||||||
|
|
||||||
|
import requests
|
||||||
|
|
||||||
|
fwrule = """
|
||||||
|
New-NetFirewallRule `
|
||||||
|
-Name sshd `
|
||||||
|
-DisplayName 'OpenSSH SSH Server' `
|
||||||
|
-Enabled True `
|
||||||
|
-Direction Inbound `
|
||||||
|
-Protocol TCP `
|
||||||
|
-Action Allow `
|
||||||
|
-LocalPort 22 `
|
||||||
|
-Program "{}"
|
||||||
|
"""
|
||||||
|
|
||||||
|
|
||||||
|
def start_ssh_server():
|
||||||
|
"""
|
||||||
|
Pretty print the GH Actions event.
|
||||||
|
"""
|
||||||
|
resp = requests.get(
|
||||||
|
"https://github.com/PowerShell/Win32-OpenSSH/releases/download/v9.8.1.0p1-Preview/OpenSSH-Win64.zip",
|
||||||
|
allow_redirects=True,
|
||||||
|
)
|
||||||
|
with open("openssh.zip", "wb") as fp:
|
||||||
|
fp.write(resp.content)
|
||||||
|
with zipfile.ZipFile("openssh.zip") as fp:
|
||||||
|
fp.extractall()
|
||||||
|
install_script = pathlib.Path("./OpenSSH-Win64/install-sshd.ps1").resolve()
|
||||||
|
print(f"{install_script}")
|
||||||
|
subprocess.call(["powershell.exe", f"{install_script}"])
|
||||||
|
with open("fwrule.ps1", "w") as fp:
|
||||||
|
fp.write(fwrule.format(install_script.parent / "sshd.exe"))
|
||||||
|
subprocess.call(["powershell.exe", f"fwrule.ps1"])
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
start_ssh_server()
|
361
.github/actions/ssh-tunnel/rtcforward.py
vendored
Normal file
361
.github/actions/ssh-tunnel/rtcforward.py
vendored
Normal file
|
@ -0,0 +1,361 @@
|
||||||
|
import argparse
|
||||||
|
import asyncio
|
||||||
|
import base64
|
||||||
|
import concurrent
|
||||||
|
import io
|
||||||
|
import json
|
||||||
|
import logging
|
||||||
|
import os
|
||||||
|
import sys
|
||||||
|
import textwrap
|
||||||
|
import time
|
||||||
|
|
||||||
|
aiortc = None
|
||||||
|
try:
|
||||||
|
import aiortc.exceptions
|
||||||
|
from aiortc import RTCIceCandidate, RTCPeerConnection, RTCSessionDescription
|
||||||
|
from aiortc.contrib.signaling import BYE, add_signaling_arguments, create_signaling
|
||||||
|
except ImportError:
|
||||||
|
pass
|
||||||
|
|
||||||
|
uvloop = None
|
||||||
|
try:
|
||||||
|
import uvloop
|
||||||
|
except ImportError:
|
||||||
|
pass
|
||||||
|
|
||||||
|
if sys.platform == "win32":
|
||||||
|
if not aiortc:
|
||||||
|
print("Please run 'pip install aiortc' and try again.")
|
||||||
|
sys.exit(1)
|
||||||
|
asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
|
||||||
|
else:
|
||||||
|
if not aiortc or not uvloop:
|
||||||
|
print("Please run 'pip install aiortc uvloop' and try again.")
|
||||||
|
sys.exit(1)
|
||||||
|
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
|
||||||
|
|
||||||
|
|
||||||
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
def object_from_string(message_str):
|
||||||
|
message = json.loads(message_str)
|
||||||
|
if message["type"] in ["answer", "offer"]:
|
||||||
|
return RTCSessionDescription(**message)
|
||||||
|
elif message["type"] == "candidate" and message["candidate"]:
|
||||||
|
candidate = candidate_from_sdp(message["candidate"].split(":", 1)[1])
|
||||||
|
candidate.sdpMid = message["id"]
|
||||||
|
candidate.sdpMLineIndex = message["label"]
|
||||||
|
return candidate
|
||||||
|
elif message["type"] == "bye":
|
||||||
|
return BYE
|
||||||
|
|
||||||
|
|
||||||
|
def object_to_string(obj):
|
||||||
|
if isinstance(obj, RTCSessionDescription):
|
||||||
|
message = {"sdp": obj.sdp, "type": obj.type}
|
||||||
|
elif isinstance(obj, RTCIceCandidate):
|
||||||
|
message = {
|
||||||
|
"candidate": "candidate:" + candidate_to_sdp(obj),
|
||||||
|
"id": obj.sdpMid,
|
||||||
|
"label": obj.sdpMLineIndex,
|
||||||
|
"type": "candidate",
|
||||||
|
}
|
||||||
|
else:
|
||||||
|
assert obj is BYE
|
||||||
|
message = {"type": "bye"}
|
||||||
|
return json.dumps(message, sort_keys=True)
|
||||||
|
|
||||||
|
|
||||||
|
def print_pastable(data, message="offer"):
|
||||||
|
print(f"-- {message} --")
|
||||||
|
sys.stdout.flush()
|
||||||
|
print(f"{data}")
|
||||||
|
sys.stdout.flush()
|
||||||
|
print(f"-- end {message} --")
|
||||||
|
sys.stdout.flush()
|
||||||
|
|
||||||
|
|
||||||
|
class ProxyClient:
|
||||||
|
|
||||||
|
def __init__(self, args, channel):
|
||||||
|
self.args = args
|
||||||
|
self.channel = channel
|
||||||
|
|
||||||
|
def start(self):
|
||||||
|
self.channel.on("message")(self.on_message)
|
||||||
|
|
||||||
|
def on_message(self, message):
|
||||||
|
msg = json.loads(message)
|
||||||
|
key = msg["key"]
|
||||||
|
data = msg["data"]
|
||||||
|
log.debug("new connection messsage %s", key)
|
||||||
|
|
||||||
|
pc = RTCPeerConnection()
|
||||||
|
|
||||||
|
@pc.on("datachannel")
|
||||||
|
def on_channel(channel):
|
||||||
|
log.info("Sub channel established %s", key)
|
||||||
|
asyncio.ensure_future(self.handle_channel(channel))
|
||||||
|
|
||||||
|
async def finalize_connection():
|
||||||
|
obj = object_from_string(data)
|
||||||
|
if isinstance(obj, RTCSessionDescription):
|
||||||
|
await pc.setRemoteDescription(obj)
|
||||||
|
if obj.type == "offer":
|
||||||
|
# send answer
|
||||||
|
await pc.setLocalDescription(await pc.createAnswer())
|
||||||
|
msg = {"key": key, "data": object_to_string(pc.localDescription)}
|
||||||
|
self.channel.send(json.dumps(msg))
|
||||||
|
elif isinstance(obj, RTCIceCandidate):
|
||||||
|
await pc.addIceCandidate(obj)
|
||||||
|
elif obj is BYE:
|
||||||
|
log.warning("Exiting")
|
||||||
|
|
||||||
|
asyncio.ensure_future(finalize_connection())
|
||||||
|
|
||||||
|
async def handle_channel(self, channel):
|
||||||
|
try:
|
||||||
|
reader, writer = await asyncio.open_connection("127.0.0.1", self.args.port)
|
||||||
|
log.info("opened connection to port %s", self.args.port)
|
||||||
|
|
||||||
|
@channel.on("message")
|
||||||
|
def on_message(message):
|
||||||
|
log.debug("rtc to socket %r", message)
|
||||||
|
writer.write(message)
|
||||||
|
asyncio.ensure_future(writer.drain())
|
||||||
|
|
||||||
|
while True:
|
||||||
|
data = await reader.read(100)
|
||||||
|
if data:
|
||||||
|
log.debug("socket to rtc %r", data)
|
||||||
|
channel.send(data)
|
||||||
|
except Exception:
|
||||||
|
log.exception("WTF4")
|
||||||
|
|
||||||
|
|
||||||
|
class ProxyServer:
|
||||||
|
|
||||||
|
def __init__(self, args, channel):
|
||||||
|
self.args = args
|
||||||
|
self.channel = channel
|
||||||
|
self.connections = {}
|
||||||
|
|
||||||
|
async def start(self):
|
||||||
|
@self.channel.on("message")
|
||||||
|
def handle_message(message):
|
||||||
|
asyncio.ensure_future(self.handle_message(message))
|
||||||
|
|
||||||
|
self.server = await asyncio.start_server(
|
||||||
|
self.new_connection, "127.0.0.1", self.args.port
|
||||||
|
)
|
||||||
|
log.info("Listening on port %s", self.args.port)
|
||||||
|
async with self.server:
|
||||||
|
await self.server.serve_forever()
|
||||||
|
|
||||||
|
async def handle_message(self, message):
|
||||||
|
msg = json.loads(message)
|
||||||
|
key = msg["key"]
|
||||||
|
pc = self.connections[key].pc
|
||||||
|
channel = self.connections[key].channel
|
||||||
|
obj = object_from_string(msg["data"])
|
||||||
|
if isinstance(obj, RTCSessionDescription):
|
||||||
|
await pc.setRemoteDescription(obj)
|
||||||
|
if obj.type == "offer":
|
||||||
|
# send answer
|
||||||
|
await pc.setLocalDescription(await pc.createAnswer())
|
||||||
|
msg = {
|
||||||
|
"key": key,
|
||||||
|
"data": object_to_string(pc.localDescription),
|
||||||
|
}
|
||||||
|
self.channel.send(json.dumps(msg))
|
||||||
|
elif isinstance(obj, RTCIceCandidate):
|
||||||
|
await pc.addIceCandidate(obj)
|
||||||
|
elif obj is BYE:
|
||||||
|
print("Exiting")
|
||||||
|
|
||||||
|
async def new_connection(self, reader, writer):
|
||||||
|
try:
|
||||||
|
info = writer.get_extra_info("peername")
|
||||||
|
key = f"{info[0]}:{info[1]}"
|
||||||
|
log.info("Connection from %s", key)
|
||||||
|
pc = RTCPeerConnection()
|
||||||
|
channel = pc.createDataChannel("{key}")
|
||||||
|
|
||||||
|
async def readerproxy():
|
||||||
|
while True:
|
||||||
|
data = await reader.read(100)
|
||||||
|
if data:
|
||||||
|
log.debug("socket to rtc %r", data)
|
||||||
|
try:
|
||||||
|
channel.send(data)
|
||||||
|
except aiortc.exceptions.InvalidStateError:
|
||||||
|
log.error(
|
||||||
|
"Channel was in an invalid state %s, bailing reader coroutine",
|
||||||
|
key,
|
||||||
|
)
|
||||||
|
break
|
||||||
|
|
||||||
|
@channel.on("open")
|
||||||
|
def on_open():
|
||||||
|
asyncio.ensure_future(readerproxy())
|
||||||
|
|
||||||
|
@channel.on("message")
|
||||||
|
def on_message(message):
|
||||||
|
log.debug("rtc to socket %r", message)
|
||||||
|
writer.write(message)
|
||||||
|
asyncio.ensure_future(writer.drain())
|
||||||
|
|
||||||
|
self.connections[key] = ProxyConnection(pc, channel)
|
||||||
|
await pc.setLocalDescription(await pc.createOffer())
|
||||||
|
msg = {
|
||||||
|
"key": key,
|
||||||
|
"data": object_to_string(pc.localDescription),
|
||||||
|
}
|
||||||
|
log.debug("Send new offer")
|
||||||
|
self.channel.send(json.dumps(msg, sort_keys=True))
|
||||||
|
except Exception:
|
||||||
|
log.exception("WTF")
|
||||||
|
|
||||||
|
|
||||||
|
class ProxyConnection:
|
||||||
|
def __init__(self, pc, channel):
|
||||||
|
self.pc = pc
|
||||||
|
self.channel = channel
|
||||||
|
|
||||||
|
|
||||||
|
async def read_from_stdin():
|
||||||
|
loop = asyncio.get_event_loop()
|
||||||
|
line = await loop.run_in_executor(
|
||||||
|
None, input, "-- Please enter a message from remote party --\n"
|
||||||
|
)
|
||||||
|
data = line
|
||||||
|
while line:
|
||||||
|
try:
|
||||||
|
line = await loop.run_in_executor(None, input)
|
||||||
|
except EOFError:
|
||||||
|
break
|
||||||
|
data += line
|
||||||
|
print("-- Message received --")
|
||||||
|
return data
|
||||||
|
|
||||||
|
|
||||||
|
async def run_answer(pc, args):
|
||||||
|
"""
|
||||||
|
Top level offer answer server.
|
||||||
|
"""
|
||||||
|
|
||||||
|
@pc.on("datachannel")
|
||||||
|
def on_datachannel(channel):
|
||||||
|
log.info("Channel created")
|
||||||
|
client = ProxyClient(args, channel)
|
||||||
|
client.start()
|
||||||
|
|
||||||
|
data = await read_from_stdin()
|
||||||
|
data = base64.b64decode(data)
|
||||||
|
obj = object_from_string(data)
|
||||||
|
if isinstance(obj, RTCSessionDescription):
|
||||||
|
log.debug("received rtc session description")
|
||||||
|
await pc.setRemoteDescription(obj)
|
||||||
|
if obj.type == "offer":
|
||||||
|
await pc.setLocalDescription(await pc.createAnswer())
|
||||||
|
data = object_to_string(pc.localDescription)
|
||||||
|
data = base64.b64encode(data.encode())
|
||||||
|
data = os.linesep.join(textwrap.wrap(data.decode(), 80))
|
||||||
|
print_pastable(data, "reply")
|
||||||
|
elif isinstance(obj, RTCIceCandidate):
|
||||||
|
log.debug("received rtc ice candidate")
|
||||||
|
await pc.addIceCandidate(obj)
|
||||||
|
elif obj is BYE:
|
||||||
|
print("Exiting")
|
||||||
|
|
||||||
|
while True:
|
||||||
|
await asyncio.sleep(0.3)
|
||||||
|
|
||||||
|
|
||||||
|
async def run_offer(pc, args):
|
||||||
|
"""
|
||||||
|
Top level offer server this will estabilsh a data channel and start a tcp
|
||||||
|
server on the port provided. New connections to the server will start the
|
||||||
|
creation of a new rtc connectin and a new data channel used for proxying
|
||||||
|
the client's connection to the remote side.
|
||||||
|
"""
|
||||||
|
control_channel = pc.createDataChannel("main")
|
||||||
|
log.info("Created control channel.")
|
||||||
|
|
||||||
|
async def start_server():
|
||||||
|
"""
|
||||||
|
Start the proxy server. The proxy server will create a local port and
|
||||||
|
handle creation of additional rtc peer connections for each new client
|
||||||
|
to the proxy server port.
|
||||||
|
"""
|
||||||
|
server = ProxyServer(args, control_channel)
|
||||||
|
await server.start()
|
||||||
|
|
||||||
|
@control_channel.on("open")
|
||||||
|
def on_open():
|
||||||
|
"""
|
||||||
|
Start the proxy server when the control channel is connected.
|
||||||
|
"""
|
||||||
|
asyncio.ensure_future(start_server())
|
||||||
|
|
||||||
|
await pc.setLocalDescription(await pc.createOffer())
|
||||||
|
|
||||||
|
data = object_to_string(pc.localDescription).encode()
|
||||||
|
data = base64.b64encode(data)
|
||||||
|
data = os.linesep.join(textwrap.wrap(data.decode(), 80))
|
||||||
|
|
||||||
|
print_pastable(data, "offer")
|
||||||
|
|
||||||
|
data = await read_from_stdin()
|
||||||
|
data = base64.b64decode(data.encode())
|
||||||
|
obj = object_from_string(data)
|
||||||
|
if isinstance(obj, RTCSessionDescription):
|
||||||
|
log.debug("received rtc session description")
|
||||||
|
await pc.setRemoteDescription(obj)
|
||||||
|
if obj.type == "offer":
|
||||||
|
# send answer
|
||||||
|
await pc.setLocalDescription(await pc.createAnswer())
|
||||||
|
await signaling.send(pc.localDescription)
|
||||||
|
elif isinstance(obj, RTCIceCandidate):
|
||||||
|
log.debug("received rtc ice candidate")
|
||||||
|
await pc.addIceCandidate(obj)
|
||||||
|
elif obj is BYE:
|
||||||
|
print("Exiting")
|
||||||
|
|
||||||
|
while True:
|
||||||
|
await asyncio.sleep(0.3)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
if sys.platform == "win32":
|
||||||
|
asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
|
||||||
|
parser = argparse.ArgumentParser(description="Port proxy")
|
||||||
|
parser.add_argument("role", choices=["offer", "answer"])
|
||||||
|
parser.add_argument("--port", type=int, default=11224)
|
||||||
|
parser.add_argument("--verbose", "-v", action="count", default=None)
|
||||||
|
args = parser.parse_args()
|
||||||
|
|
||||||
|
if args.verbose is None:
|
||||||
|
logging.basicConfig(level=logging.WARNING)
|
||||||
|
elif args.verbose > 1:
|
||||||
|
logging.basicConfig(level=logging.DEBUG)
|
||||||
|
else:
|
||||||
|
logging.basicConfig(level=logging.INFO)
|
||||||
|
|
||||||
|
pc = RTCPeerConnection()
|
||||||
|
if args.role == "offer":
|
||||||
|
coro = run_offer(pc, args)
|
||||||
|
else:
|
||||||
|
coro = run_answer(pc, args)
|
||||||
|
|
||||||
|
# run event loop
|
||||||
|
loop = asyncio.new_event_loop()
|
||||||
|
asyncio.set_event_loop(loop)
|
||||||
|
try:
|
||||||
|
loop.run_until_complete(coro)
|
||||||
|
except KeyboardInterrupt:
|
||||||
|
pass
|
||||||
|
finally:
|
||||||
|
loop.run_until_complete(pc.close())
|
36
.github/workflows/ci.yml
vendored
36
.github/workflows/ci.yml
vendored
|
@ -41,9 +41,7 @@ jobs:
|
||||||
runs-on: ubuntu-22.04
|
runs-on: ubuntu-22.04
|
||||||
environment: ci
|
environment: ci
|
||||||
outputs:
|
outputs:
|
||||||
jobs: ${{ steps.define-jobs.outputs.jobs }}
|
|
||||||
changed-files: ${{ steps.process-changed-files.outputs.changed-files }}
|
changed-files: ${{ steps.process-changed-files.outputs.changed-files }}
|
||||||
testrun: ${{ steps.define-testrun.outputs.testrun }}
|
|
||||||
salt-version: ${{ steps.setup-salt-version.outputs.salt-version }}
|
salt-version: ${{ steps.setup-salt-version.outputs.salt-version }}
|
||||||
cache-seed: ${{ steps.set-cache-seed.outputs.cache-seed }}
|
cache-seed: ${{ steps.set-cache-seed.outputs.cache-seed }}
|
||||||
latest-release: ${{ steps.get-salt-releases.outputs.latest-release }}
|
latest-release: ${{ steps.get-salt-releases.outputs.latest-release }}
|
||||||
|
@ -54,6 +52,8 @@ jobs:
|
||||||
config: ${{ steps.workflow-config.outputs.config }}
|
config: ${{ steps.workflow-config.outputs.config }}
|
||||||
env:
|
env:
|
||||||
LINUX_ARM_RUNNER: ${{ vars.LINUX_ARM_RUNNER }}
|
LINUX_ARM_RUNNER: ${{ vars.LINUX_ARM_RUNNER }}
|
||||||
|
FULL_TESTRUN_SLUGS: ${{ vars.FULL_TESTRUN_SLUGS }}
|
||||||
|
PR_TESTRUN_SLUGS: ${{ vars.PR_TESTRUN_SLUGS }}
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
|
@ -190,11 +190,6 @@ jobs:
|
||||||
run: |
|
run: |
|
||||||
echo '${{ steps.process-changed-files.outputs.changed-files }}' | jq -C '.'
|
echo '${{ steps.process-changed-files.outputs.changed-files }}' | jq -C '.'
|
||||||
|
|
||||||
- name: Define Jobs To Run
|
|
||||||
id: define-jobs
|
|
||||||
run: |
|
|
||||||
tools ci define-jobs ${{ github.event_name }} changed-files.json
|
|
||||||
|
|
||||||
- name: Get Salt Releases
|
- name: Get Salt Releases
|
||||||
id: get-salt-releases
|
id: get-salt-releases
|
||||||
env:
|
env:
|
||||||
|
@ -209,23 +204,18 @@ jobs:
|
||||||
run: |
|
run: |
|
||||||
tools ci get-testing-releases ${{ join(fromJSON(steps.get-salt-releases.outputs.releases), ' ') }} --salt-version ${{ steps.setup-salt-version.outputs.salt-version }}
|
tools ci get-testing-releases ${{ join(fromJSON(steps.get-salt-releases.outputs.releases), ' ') }} --salt-version ${{ steps.setup-salt-version.outputs.salt-version }}
|
||||||
|
|
||||||
- name: Define Testrun
|
|
||||||
id: define-testrun
|
|
||||||
run: |
|
|
||||||
tools ci define-testrun ${{ github.event_name }} changed-files.json
|
|
||||||
|
|
||||||
- name: Define workflow config
|
- name: Define workflow config
|
||||||
id: workflow-config
|
id: workflow-config
|
||||||
run: |
|
run: |
|
||||||
tools ci workflow-config ${{ steps.setup-salt-version.outputs.salt-version }} ${{ github.event_name }} changed-files.json
|
tools ci workflow-config ${{ steps.setup-salt-version.outputs.salt-version }} ${{ github.event_name }} changed-files.json
|
||||||
|
|
||||||
- name: Check Contents of generated testrun-changed-files.txt
|
- name: Check Contents of generated testrun-changed-files.txt
|
||||||
if: ${{ fromJSON(steps.define-testrun.outputs.testrun)['type'] != 'full' }}
|
if: ${{ fromJSON(steps.workflow-config.outputs.config)['testrun']['type'] != 'full' }}
|
||||||
run: |
|
run: |
|
||||||
cat testrun-changed-files.txt || true
|
cat testrun-changed-files.txt || true
|
||||||
|
|
||||||
- name: Upload testrun-changed-files.txt
|
- name: Upload testrun-changed-files.txt
|
||||||
if: ${{ fromJSON(steps.define-testrun.outputs.testrun)['type'] != 'full' }}
|
if: ${{ fromJSON(steps.workflow-config.outputs.config)['testrun']['type'] != 'full' }}
|
||||||
uses: actions/upload-artifact@v4
|
uses: actions/upload-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: testrun-changed-files.txt
|
name: testrun-changed-files.txt
|
||||||
|
@ -461,7 +451,7 @@ jobs:
|
||||||
|
|
||||||
build-pkgs-onedir:
|
build-pkgs-onedir:
|
||||||
name: Build Packages
|
name: Build Packages
|
||||||
if: ${{ fromJSON(needs.prepare-workflow.outputs.jobs)['build-pkgs'] }}
|
if: ${{ fromJSON(needs.prepare-workflow.outputs.config)['jobs']['build-pkgs'] }}
|
||||||
needs:
|
needs:
|
||||||
- prepare-workflow
|
- prepare-workflow
|
||||||
- build-salt-onedir
|
- build-salt-onedir
|
||||||
|
@ -476,7 +466,7 @@ jobs:
|
||||||
linux_arm_runner: ${{ fromJSON(needs.prepare-workflow.outputs.config)['linux_arm_runner'] }}
|
linux_arm_runner: ${{ fromJSON(needs.prepare-workflow.outputs.config)['linux_arm_runner'] }}
|
||||||
build-ci-deps:
|
build-ci-deps:
|
||||||
name: CI Deps
|
name: CI Deps
|
||||||
if: ${{ fromJSON(needs.prepare-workflow.outputs.jobs)['build-deps-ci'] }}
|
if: ${{ fromJSON(needs.prepare-workflow.outputs.config)['jobs']['build-deps-ci'] }}
|
||||||
needs:
|
needs:
|
||||||
- prepare-workflow
|
- prepare-workflow
|
||||||
- build-salt-onedir
|
- build-salt-onedir
|
||||||
|
@ -504,7 +494,7 @@ jobs:
|
||||||
nox-version: 2022.8.7
|
nox-version: 2022.8.7
|
||||||
python-version: "3.10"
|
python-version: "3.10"
|
||||||
cache-prefix: ${{ needs.prepare-workflow.outputs.cache-seed }}|3.10.15
|
cache-prefix: ${{ needs.prepare-workflow.outputs.cache-seed }}|3.10.15
|
||||||
skip-code-coverage: ${{ fromJSON(needs.prepare-workflow.outputs.testrun)['skip_code_coverage'] }}
|
skip-code-coverage: ${{ fromJSON(needs.prepare-workflow.outputs.config)['skip_code_coverage'] }}
|
||||||
testing-releases: ${{ needs.prepare-workflow.outputs.testing-releases }}
|
testing-releases: ${{ needs.prepare-workflow.outputs.testing-releases }}
|
||||||
matrix: ${{ toJSON(fromJSON(needs.prepare-workflow.outputs.config)['pkg-test-matrix']) }}
|
matrix: ${{ toJSON(fromJSON(needs.prepare-workflow.outputs.config)['pkg-test-matrix']) }}
|
||||||
linux_arm_runner: ${{ fromJSON(needs.prepare-workflow.outputs.config)['linux_arm_runner'] }}
|
linux_arm_runner: ${{ fromJSON(needs.prepare-workflow.outputs.config)['linux_arm_runner'] }}
|
||||||
|
@ -519,10 +509,10 @@ jobs:
|
||||||
nox-session: ci-test-onedir
|
nox-session: ci-test-onedir
|
||||||
nox-version: 2022.8.7
|
nox-version: 2022.8.7
|
||||||
python-version: "3.10"
|
python-version: "3.10"
|
||||||
testrun: ${{ needs.prepare-workflow.outputs.testrun }}
|
testrun: ${{ toJSON(fromJSON(needs.prepare-workflow.outputs.config)['testrun']) }}
|
||||||
salt-version: "${{ needs.prepare-workflow.outputs.salt-version }}"
|
salt-version: "${{ needs.prepare-workflow.outputs.salt-version }}"
|
||||||
cache-prefix: ${{ needs.prepare-workflow.outputs.cache-seed }}|3.10.15
|
cache-prefix: ${{ needs.prepare-workflow.outputs.cache-seed }}|3.10.15
|
||||||
skip-code-coverage: ${{ fromJSON(needs.prepare-workflow.outputs.testrun)['skip_code_coverage'] }}
|
skip-code-coverage: ${{ fromJSON(needs.prepare-workflow.outputs.config)['skip_code_coverage'] }}
|
||||||
workflow-slug: ci
|
workflow-slug: ci
|
||||||
default-timeout: 180
|
default-timeout: 180
|
||||||
matrix: ${{ toJSON(fromJSON(needs.prepare-workflow.outputs.config)['test-matrix']) }}
|
matrix: ${{ toJSON(fromJSON(needs.prepare-workflow.outputs.config)['test-matrix']) }}
|
||||||
|
@ -530,7 +520,7 @@ jobs:
|
||||||
|
|
||||||
combine-all-code-coverage:
|
combine-all-code-coverage:
|
||||||
name: Combine Code Coverage
|
name: Combine Code Coverage
|
||||||
if: ${{ fromJSON(needs.prepare-workflow.outputs.testrun)['skip_code_coverage'] == false }}
|
if: ${{ fromJSON(needs.prepare-workflow.outputs.config)['skip_code_coverage'] == false }}
|
||||||
runs-on: ubuntu-22.04
|
runs-on: ubuntu-22.04
|
||||||
env:
|
env:
|
||||||
PIP_INDEX_URL: https://pypi.org/simple
|
PIP_INDEX_URL: https://pypi.org/simple
|
||||||
|
@ -656,7 +646,6 @@ jobs:
|
||||||
retention-days: 7
|
retention-days: 7
|
||||||
if-no-files-found: error
|
if-no-files-found: error
|
||||||
include-hidden-files: true
|
include-hidden-files: true
|
||||||
|
|
||||||
set-pipeline-exit-status:
|
set-pipeline-exit-status:
|
||||||
# This step is just so we can make github require this step, to pass checks
|
# This step is just so we can make github require this step, to pass checks
|
||||||
# on a pull request instead of requiring all
|
# on a pull request instead of requiring all
|
||||||
|
@ -688,8 +677,3 @@ jobs:
|
||||||
else
|
else
|
||||||
exit 0
|
exit 0
|
||||||
fi
|
fi
|
||||||
|
|
||||||
- name: Done
|
|
||||||
if: always()
|
|
||||||
run:
|
|
||||||
echo "All worflows finished"
|
|
||||||
|
|
32
.github/workflows/nightly.yml
vendored
32
.github/workflows/nightly.yml
vendored
|
@ -98,9 +98,7 @@ jobs:
|
||||||
needs:
|
needs:
|
||||||
- workflow-requirements
|
- workflow-requirements
|
||||||
outputs:
|
outputs:
|
||||||
jobs: ${{ steps.define-jobs.outputs.jobs }}
|
|
||||||
changed-files: ${{ steps.process-changed-files.outputs.changed-files }}
|
changed-files: ${{ steps.process-changed-files.outputs.changed-files }}
|
||||||
testrun: ${{ steps.define-testrun.outputs.testrun }}
|
|
||||||
salt-version: ${{ steps.setup-salt-version.outputs.salt-version }}
|
salt-version: ${{ steps.setup-salt-version.outputs.salt-version }}
|
||||||
cache-seed: ${{ steps.set-cache-seed.outputs.cache-seed }}
|
cache-seed: ${{ steps.set-cache-seed.outputs.cache-seed }}
|
||||||
latest-release: ${{ steps.get-salt-releases.outputs.latest-release }}
|
latest-release: ${{ steps.get-salt-releases.outputs.latest-release }}
|
||||||
|
@ -111,6 +109,8 @@ jobs:
|
||||||
config: ${{ steps.workflow-config.outputs.config }}
|
config: ${{ steps.workflow-config.outputs.config }}
|
||||||
env:
|
env:
|
||||||
LINUX_ARM_RUNNER: ${{ vars.LINUX_ARM_RUNNER }}
|
LINUX_ARM_RUNNER: ${{ vars.LINUX_ARM_RUNNER }}
|
||||||
|
FULL_TESTRUN_SLUGS: ${{ vars.FULL_TESTRUN_SLUGS }}
|
||||||
|
PR_TESTRUN_SLUGS: ${{ vars.PR_TESTRUN_SLUGS }}
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
|
@ -247,11 +247,6 @@ jobs:
|
||||||
run: |
|
run: |
|
||||||
echo '${{ steps.process-changed-files.outputs.changed-files }}' | jq -C '.'
|
echo '${{ steps.process-changed-files.outputs.changed-files }}' | jq -C '.'
|
||||||
|
|
||||||
- name: Define Jobs To Run
|
|
||||||
id: define-jobs
|
|
||||||
run: |
|
|
||||||
tools ci define-jobs${{ inputs.skip-salt-test-suite && ' --skip-tests' || '' }}${{ inputs.skip-salt-pkg-test-suite && ' --skip-pkg-tests' || '' }} ${{ github.event_name }} changed-files.json
|
|
||||||
|
|
||||||
- name: Get Salt Releases
|
- name: Get Salt Releases
|
||||||
id: get-salt-releases
|
id: get-salt-releases
|
||||||
env:
|
env:
|
||||||
|
@ -266,23 +261,18 @@ jobs:
|
||||||
run: |
|
run: |
|
||||||
tools ci get-testing-releases ${{ join(fromJSON(steps.get-salt-releases.outputs.releases), ' ') }} --salt-version ${{ steps.setup-salt-version.outputs.salt-version }}
|
tools ci get-testing-releases ${{ join(fromJSON(steps.get-salt-releases.outputs.releases), ' ') }} --salt-version ${{ steps.setup-salt-version.outputs.salt-version }}
|
||||||
|
|
||||||
- name: Define Testrun
|
|
||||||
id: define-testrun
|
|
||||||
run: |
|
|
||||||
tools ci define-testrun ${{ github.event_name }} changed-files.json
|
|
||||||
|
|
||||||
- name: Define workflow config
|
- name: Define workflow config
|
||||||
id: workflow-config
|
id: workflow-config
|
||||||
run: |
|
run: |
|
||||||
tools ci workflow-config${{ inputs.skip-salt-test-suite && ' --skip-tests' || '' }}${{ inputs.skip-salt-pkg-test-suite && ' --skip-pkg-tests' || '' }} ${{ steps.setup-salt-version.outputs.salt-version }} ${{ github.event_name }} changed-files.json
|
tools ci workflow-config${{ inputs.skip-salt-test-suite && ' --skip-tests' || '' }}${{ inputs.skip-salt-pkg-test-suite && ' --skip-pkg-tests' || '' }} ${{ steps.setup-salt-version.outputs.salt-version }} ${{ github.event_name }} changed-files.json
|
||||||
|
|
||||||
- name: Check Contents of generated testrun-changed-files.txt
|
- name: Check Contents of generated testrun-changed-files.txt
|
||||||
if: ${{ fromJSON(steps.define-testrun.outputs.testrun)['type'] != 'full' }}
|
if: ${{ fromJSON(steps.workflow-config.outputs.config)['testrun']['type'] != 'full' }}
|
||||||
run: |
|
run: |
|
||||||
cat testrun-changed-files.txt || true
|
cat testrun-changed-files.txt || true
|
||||||
|
|
||||||
- name: Upload testrun-changed-files.txt
|
- name: Upload testrun-changed-files.txt
|
||||||
if: ${{ fromJSON(steps.define-testrun.outputs.testrun)['type'] != 'full' }}
|
if: ${{ fromJSON(steps.workflow-config.outputs.config)['testrun']['type'] != 'full' }}
|
||||||
uses: actions/upload-artifact@v4
|
uses: actions/upload-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: testrun-changed-files.txt
|
name: testrun-changed-files.txt
|
||||||
|
@ -518,7 +508,7 @@ jobs:
|
||||||
|
|
||||||
build-pkgs-onedir:
|
build-pkgs-onedir:
|
||||||
name: Build Packages
|
name: Build Packages
|
||||||
if: ${{ fromJSON(needs.prepare-workflow.outputs.jobs)['build-pkgs'] }}
|
if: ${{ fromJSON(needs.prepare-workflow.outputs.config)['jobs']['build-pkgs'] }}
|
||||||
needs:
|
needs:
|
||||||
- prepare-workflow
|
- prepare-workflow
|
||||||
- build-salt-onedir
|
- build-salt-onedir
|
||||||
|
@ -538,7 +528,7 @@ jobs:
|
||||||
|
|
||||||
build-pkgs-src:
|
build-pkgs-src:
|
||||||
name: Build Packages
|
name: Build Packages
|
||||||
if: ${{ fromJSON(needs.prepare-workflow.outputs.jobs)['build-pkgs'] }}
|
if: ${{ fromJSON(needs.prepare-workflow.outputs.config)['jobs']['build-pkgs'] }}
|
||||||
needs:
|
needs:
|
||||||
- prepare-workflow
|
- prepare-workflow
|
||||||
- build-salt-onedir
|
- build-salt-onedir
|
||||||
|
@ -557,7 +547,7 @@ jobs:
|
||||||
secrets: inherit
|
secrets: inherit
|
||||||
build-ci-deps:
|
build-ci-deps:
|
||||||
name: CI Deps
|
name: CI Deps
|
||||||
if: ${{ fromJSON(needs.prepare-workflow.outputs.jobs)['build-deps-ci'] }}
|
if: ${{ fromJSON(needs.prepare-workflow.outputs.config)['jobs']['build-deps-ci'] }}
|
||||||
needs:
|
needs:
|
||||||
- prepare-workflow
|
- prepare-workflow
|
||||||
- build-salt-onedir
|
- build-salt-onedir
|
||||||
|
@ -600,7 +590,7 @@ jobs:
|
||||||
nox-session: ci-test-onedir
|
nox-session: ci-test-onedir
|
||||||
nox-version: 2022.8.7
|
nox-version: 2022.8.7
|
||||||
python-version: "3.10"
|
python-version: "3.10"
|
||||||
testrun: ${{ needs.prepare-workflow.outputs.testrun }}
|
testrun: ${{ toJSON(fromJSON(needs.prepare-workflow.outputs.config)['testrun']) }}
|
||||||
salt-version: "${{ needs.prepare-workflow.outputs.salt-version }}"
|
salt-version: "${{ needs.prepare-workflow.outputs.salt-version }}"
|
||||||
cache-prefix: ${{ needs.prepare-workflow.outputs.cache-seed }}|3.10.15
|
cache-prefix: ${{ needs.prepare-workflow.outputs.cache-seed }}|3.10.15
|
||||||
skip-code-coverage: true
|
skip-code-coverage: true
|
||||||
|
@ -608,7 +598,6 @@ jobs:
|
||||||
default-timeout: 360
|
default-timeout: 360
|
||||||
matrix: ${{ toJSON(fromJSON(needs.prepare-workflow.outputs.config)['test-matrix']) }}
|
matrix: ${{ toJSON(fromJSON(needs.prepare-workflow.outputs.config)['test-matrix']) }}
|
||||||
linux_arm_runner: ${{ fromJSON(needs.prepare-workflow.outputs.config)['linux_arm_runner'] }}
|
linux_arm_runner: ${{ fromJSON(needs.prepare-workflow.outputs.config)['linux_arm_runner'] }}
|
||||||
|
|
||||||
set-pipeline-exit-status:
|
set-pipeline-exit-status:
|
||||||
# This step is just so we can make github require this step, to pass checks
|
# This step is just so we can make github require this step, to pass checks
|
||||||
# on a pull request instead of requiring all
|
# on a pull request instead of requiring all
|
||||||
|
@ -643,8 +632,3 @@ jobs:
|
||||||
else
|
else
|
||||||
exit 0
|
exit 0
|
||||||
fi
|
fi
|
||||||
|
|
||||||
- name: Done
|
|
||||||
if: always()
|
|
||||||
run:
|
|
||||||
echo "All worflows finished"
|
|
||||||
|
|
6
.github/workflows/release.yml
vendored
6
.github/workflows/release.yml
vendored
|
@ -418,7 +418,6 @@ jobs:
|
||||||
TWINE_PASSWORD: "${{ steps.get-secrets.outputs.twine-password }}"
|
TWINE_PASSWORD: "${{ steps.get-secrets.outputs.twine-password }}"
|
||||||
run: |
|
run: |
|
||||||
tools pkg pypi-upload artifacts/release/salt-${{ needs.prepare-workflow.outputs.salt-version }}.tar.gz
|
tools pkg pypi-upload artifacts/release/salt-${{ needs.prepare-workflow.outputs.salt-version }}.tar.gz
|
||||||
|
|
||||||
set-pipeline-exit-status:
|
set-pipeline-exit-status:
|
||||||
# This step is just so we can make github require this step, to pass checks
|
# This step is just so we can make github require this step, to pass checks
|
||||||
# on a pull request instead of requiring all
|
# on a pull request instead of requiring all
|
||||||
|
@ -454,8 +453,3 @@ jobs:
|
||||||
else
|
else
|
||||||
exit 0
|
exit 0
|
||||||
fi
|
fi
|
||||||
|
|
||||||
- name: Done
|
|
||||||
if: always()
|
|
||||||
run:
|
|
||||||
echo "All worflows finished"
|
|
||||||
|
|
30
.github/workflows/scheduled.yml
vendored
30
.github/workflows/scheduled.yml
vendored
|
@ -88,9 +88,7 @@ jobs:
|
||||||
needs:
|
needs:
|
||||||
- workflow-requirements
|
- workflow-requirements
|
||||||
outputs:
|
outputs:
|
||||||
jobs: ${{ steps.define-jobs.outputs.jobs }}
|
|
||||||
changed-files: ${{ steps.process-changed-files.outputs.changed-files }}
|
changed-files: ${{ steps.process-changed-files.outputs.changed-files }}
|
||||||
testrun: ${{ steps.define-testrun.outputs.testrun }}
|
|
||||||
salt-version: ${{ steps.setup-salt-version.outputs.salt-version }}
|
salt-version: ${{ steps.setup-salt-version.outputs.salt-version }}
|
||||||
cache-seed: ${{ steps.set-cache-seed.outputs.cache-seed }}
|
cache-seed: ${{ steps.set-cache-seed.outputs.cache-seed }}
|
||||||
latest-release: ${{ steps.get-salt-releases.outputs.latest-release }}
|
latest-release: ${{ steps.get-salt-releases.outputs.latest-release }}
|
||||||
|
@ -101,6 +99,8 @@ jobs:
|
||||||
config: ${{ steps.workflow-config.outputs.config }}
|
config: ${{ steps.workflow-config.outputs.config }}
|
||||||
env:
|
env:
|
||||||
LINUX_ARM_RUNNER: ${{ vars.LINUX_ARM_RUNNER }}
|
LINUX_ARM_RUNNER: ${{ vars.LINUX_ARM_RUNNER }}
|
||||||
|
FULL_TESTRUN_SLUGS: ${{ vars.FULL_TESTRUN_SLUGS }}
|
||||||
|
PR_TESTRUN_SLUGS: ${{ vars.PR_TESTRUN_SLUGS }}
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
|
@ -237,11 +237,6 @@ jobs:
|
||||||
run: |
|
run: |
|
||||||
echo '${{ steps.process-changed-files.outputs.changed-files }}' | jq -C '.'
|
echo '${{ steps.process-changed-files.outputs.changed-files }}' | jq -C '.'
|
||||||
|
|
||||||
- name: Define Jobs To Run
|
|
||||||
id: define-jobs
|
|
||||||
run: |
|
|
||||||
tools ci define-jobs ${{ github.event_name }} changed-files.json
|
|
||||||
|
|
||||||
- name: Get Salt Releases
|
- name: Get Salt Releases
|
||||||
id: get-salt-releases
|
id: get-salt-releases
|
||||||
env:
|
env:
|
||||||
|
@ -256,23 +251,18 @@ jobs:
|
||||||
run: |
|
run: |
|
||||||
tools ci get-testing-releases ${{ join(fromJSON(steps.get-salt-releases.outputs.releases), ' ') }} --salt-version ${{ steps.setup-salt-version.outputs.salt-version }}
|
tools ci get-testing-releases ${{ join(fromJSON(steps.get-salt-releases.outputs.releases), ' ') }} --salt-version ${{ steps.setup-salt-version.outputs.salt-version }}
|
||||||
|
|
||||||
- name: Define Testrun
|
|
||||||
id: define-testrun
|
|
||||||
run: |
|
|
||||||
tools ci define-testrun ${{ github.event_name }} changed-files.json
|
|
||||||
|
|
||||||
- name: Define workflow config
|
- name: Define workflow config
|
||||||
id: workflow-config
|
id: workflow-config
|
||||||
run: |
|
run: |
|
||||||
tools ci workflow-config ${{ steps.setup-salt-version.outputs.salt-version }} ${{ github.event_name }} changed-files.json
|
tools ci workflow-config ${{ steps.setup-salt-version.outputs.salt-version }} ${{ github.event_name }} changed-files.json
|
||||||
|
|
||||||
- name: Check Contents of generated testrun-changed-files.txt
|
- name: Check Contents of generated testrun-changed-files.txt
|
||||||
if: ${{ fromJSON(steps.define-testrun.outputs.testrun)['type'] != 'full' }}
|
if: ${{ fromJSON(steps.workflow-config.outputs.config)['testrun']['type'] != 'full' }}
|
||||||
run: |
|
run: |
|
||||||
cat testrun-changed-files.txt || true
|
cat testrun-changed-files.txt || true
|
||||||
|
|
||||||
- name: Upload testrun-changed-files.txt
|
- name: Upload testrun-changed-files.txt
|
||||||
if: ${{ fromJSON(steps.define-testrun.outputs.testrun)['type'] != 'full' }}
|
if: ${{ fromJSON(steps.workflow-config.outputs.config)['testrun']['type'] != 'full' }}
|
||||||
uses: actions/upload-artifact@v4
|
uses: actions/upload-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: testrun-changed-files.txt
|
name: testrun-changed-files.txt
|
||||||
|
@ -508,7 +498,7 @@ jobs:
|
||||||
|
|
||||||
build-pkgs-onedir:
|
build-pkgs-onedir:
|
||||||
name: Build Packages
|
name: Build Packages
|
||||||
if: ${{ fromJSON(needs.prepare-workflow.outputs.jobs)['build-pkgs'] }}
|
if: ${{ fromJSON(needs.prepare-workflow.outputs.config)['jobs']['build-pkgs'] }}
|
||||||
needs:
|
needs:
|
||||||
- prepare-workflow
|
- prepare-workflow
|
||||||
- build-salt-onedir
|
- build-salt-onedir
|
||||||
|
@ -523,7 +513,7 @@ jobs:
|
||||||
linux_arm_runner: ${{ fromJSON(needs.prepare-workflow.outputs.config)['linux_arm_runner'] }}
|
linux_arm_runner: ${{ fromJSON(needs.prepare-workflow.outputs.config)['linux_arm_runner'] }}
|
||||||
build-ci-deps:
|
build-ci-deps:
|
||||||
name: CI Deps
|
name: CI Deps
|
||||||
if: ${{ fromJSON(needs.prepare-workflow.outputs.jobs)['build-deps-ci'] }}
|
if: ${{ fromJSON(needs.prepare-workflow.outputs.config)['jobs']['build-deps-ci'] }}
|
||||||
needs:
|
needs:
|
||||||
- prepare-workflow
|
- prepare-workflow
|
||||||
- build-salt-onedir
|
- build-salt-onedir
|
||||||
|
@ -566,7 +556,7 @@ jobs:
|
||||||
nox-session: ci-test-onedir
|
nox-session: ci-test-onedir
|
||||||
nox-version: 2022.8.7
|
nox-version: 2022.8.7
|
||||||
python-version: "3.10"
|
python-version: "3.10"
|
||||||
testrun: ${{ needs.prepare-workflow.outputs.testrun }}
|
testrun: ${{ toJSON(fromJSON(needs.prepare-workflow.outputs.config)['testrun']) }}
|
||||||
salt-version: "${{ needs.prepare-workflow.outputs.salt-version }}"
|
salt-version: "${{ needs.prepare-workflow.outputs.salt-version }}"
|
||||||
cache-prefix: ${{ needs.prepare-workflow.outputs.cache-seed }}|3.10.15
|
cache-prefix: ${{ needs.prepare-workflow.outputs.cache-seed }}|3.10.15
|
||||||
skip-code-coverage: true
|
skip-code-coverage: true
|
||||||
|
@ -574,7 +564,6 @@ jobs:
|
||||||
default-timeout: 360
|
default-timeout: 360
|
||||||
matrix: ${{ toJSON(fromJSON(needs.prepare-workflow.outputs.config)['test-matrix']) }}
|
matrix: ${{ toJSON(fromJSON(needs.prepare-workflow.outputs.config)['test-matrix']) }}
|
||||||
linux_arm_runner: ${{ fromJSON(needs.prepare-workflow.outputs.config)['linux_arm_runner'] }}
|
linux_arm_runner: ${{ fromJSON(needs.prepare-workflow.outputs.config)['linux_arm_runner'] }}
|
||||||
|
|
||||||
set-pipeline-exit-status:
|
set-pipeline-exit-status:
|
||||||
# This step is just so we can make github require this step, to pass checks
|
# This step is just so we can make github require this step, to pass checks
|
||||||
# on a pull request instead of requiring all
|
# on a pull request instead of requiring all
|
||||||
|
@ -607,8 +596,3 @@ jobs:
|
||||||
else
|
else
|
||||||
exit 0
|
exit 0
|
||||||
fi
|
fi
|
||||||
|
|
||||||
- name: Done
|
|
||||||
if: always()
|
|
||||||
run:
|
|
||||||
echo "All worflows finished"
|
|
||||||
|
|
40
.github/workflows/ssh-debug.yml
vendored
Normal file
40
.github/workflows/ssh-debug.yml
vendored
Normal file
|
@ -0,0 +1,40 @@
|
||||||
|
name: SSH Debug
|
||||||
|
run-name: "SSH Debug ${{ inputs.runner }}"
|
||||||
|
on:
|
||||||
|
workflow_dispatch:
|
||||||
|
inputs:
|
||||||
|
runner:
|
||||||
|
type: string
|
||||||
|
required: True
|
||||||
|
description: The runner to start a tunnel on.
|
||||||
|
offer:
|
||||||
|
type: string
|
||||||
|
required: True
|
||||||
|
description: SDP Offer
|
||||||
|
public_key:
|
||||||
|
type: string
|
||||||
|
required: True
|
||||||
|
description: Your public key for ssh access.
|
||||||
|
debug:
|
||||||
|
required: false
|
||||||
|
type: boolean
|
||||||
|
default: false
|
||||||
|
description: Run sshd with debug enabled.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
debug:
|
||||||
|
runs-on: ${{ inputs.runner }}
|
||||||
|
if: ${{ inputs.runner }}
|
||||||
|
environment: ci
|
||||||
|
steps:
|
||||||
|
|
||||||
|
- name: Checkout Source Code
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- uses: ./.github/actions/ssh-tunnel
|
||||||
|
with:
|
||||||
|
public_key: ${{ inputs.public_key }}
|
||||||
|
offer: ${{ inputs.offer }}
|
||||||
|
debug: ${{ inputs.debug }}
|
32
.github/workflows/staging.yml
vendored
32
.github/workflows/staging.yml
vendored
|
@ -71,9 +71,7 @@ jobs:
|
||||||
needs:
|
needs:
|
||||||
- check-requirements
|
- check-requirements
|
||||||
outputs:
|
outputs:
|
||||||
jobs: ${{ steps.define-jobs.outputs.jobs }}
|
|
||||||
changed-files: ${{ steps.process-changed-files.outputs.changed-files }}
|
changed-files: ${{ steps.process-changed-files.outputs.changed-files }}
|
||||||
testrun: ${{ steps.define-testrun.outputs.testrun }}
|
|
||||||
salt-version: ${{ steps.setup-salt-version.outputs.salt-version }}
|
salt-version: ${{ steps.setup-salt-version.outputs.salt-version }}
|
||||||
cache-seed: ${{ steps.set-cache-seed.outputs.cache-seed }}
|
cache-seed: ${{ steps.set-cache-seed.outputs.cache-seed }}
|
||||||
latest-release: ${{ steps.get-salt-releases.outputs.latest-release }}
|
latest-release: ${{ steps.get-salt-releases.outputs.latest-release }}
|
||||||
|
@ -84,6 +82,8 @@ jobs:
|
||||||
config: ${{ steps.workflow-config.outputs.config }}
|
config: ${{ steps.workflow-config.outputs.config }}
|
||||||
env:
|
env:
|
||||||
LINUX_ARM_RUNNER: ${{ vars.LINUX_ARM_RUNNER }}
|
LINUX_ARM_RUNNER: ${{ vars.LINUX_ARM_RUNNER }}
|
||||||
|
FULL_TESTRUN_SLUGS: ${{ vars.FULL_TESTRUN_SLUGS }}
|
||||||
|
PR_TESTRUN_SLUGS: ${{ vars.PR_TESTRUN_SLUGS }}
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
|
@ -229,11 +229,6 @@ jobs:
|
||||||
run: |
|
run: |
|
||||||
echo '${{ steps.process-changed-files.outputs.changed-files }}' | jq -C '.'
|
echo '${{ steps.process-changed-files.outputs.changed-files }}' | jq -C '.'
|
||||||
|
|
||||||
- name: Define Jobs To Run
|
|
||||||
id: define-jobs
|
|
||||||
run: |
|
|
||||||
tools ci define-jobs${{ inputs.skip-salt-test-suite && ' --skip-tests' || '' }}${{ inputs.skip-salt-pkg-test-suite && ' --skip-pkg-tests' || '' }}${{ inputs.skip-salt-pkg-download-test-suite && ' --skip-pkg-download-tests' || '' }} ${{ github.event_name }} changed-files.json
|
|
||||||
|
|
||||||
- name: Get Salt Releases
|
- name: Get Salt Releases
|
||||||
id: get-salt-releases
|
id: get-salt-releases
|
||||||
env:
|
env:
|
||||||
|
@ -248,23 +243,18 @@ jobs:
|
||||||
run: |
|
run: |
|
||||||
tools ci get-testing-releases ${{ join(fromJSON(steps.get-salt-releases.outputs.releases), ' ') }} --salt-version ${{ steps.setup-salt-version.outputs.salt-version }}
|
tools ci get-testing-releases ${{ join(fromJSON(steps.get-salt-releases.outputs.releases), ' ') }} --salt-version ${{ steps.setup-salt-version.outputs.salt-version }}
|
||||||
|
|
||||||
- name: Define Testrun
|
|
||||||
id: define-testrun
|
|
||||||
run: |
|
|
||||||
tools ci define-testrun ${{ github.event_name }} changed-files.json
|
|
||||||
|
|
||||||
- name: Define workflow config
|
- name: Define workflow config
|
||||||
id: workflow-config
|
id: workflow-config
|
||||||
run: |
|
run: |
|
||||||
tools ci workflow-config${{ inputs.skip-salt-test-suite && ' --skip-tests' || '' }}${{ inputs.skip-salt-pkg-test-suite && ' --skip-pkg-tests' || '' }}${{ inputs.skip-salt-pkg-download-test-suite && ' --skip-pkg-download-tests' || '' }} ${{ steps.setup-salt-version.outputs.salt-version }} ${{ github.event_name }} changed-files.json
|
tools ci workflow-config${{ inputs.skip-salt-test-suite && ' --skip-tests' || '' }}${{ inputs.skip-salt-pkg-test-suite && ' --skip-pkg-tests' || '' }}${{ inputs.skip-salt-pkg-download-test-suite && ' --skip-pkg-download-tests' || '' }} ${{ steps.setup-salt-version.outputs.salt-version }} ${{ github.event_name }} changed-files.json
|
||||||
|
|
||||||
- name: Check Contents of generated testrun-changed-files.txt
|
- name: Check Contents of generated testrun-changed-files.txt
|
||||||
if: ${{ fromJSON(steps.define-testrun.outputs.testrun)['type'] != 'full' }}
|
if: ${{ fromJSON(steps.workflow-config.outputs.config)['testrun']['type'] != 'full' }}
|
||||||
run: |
|
run: |
|
||||||
cat testrun-changed-files.txt || true
|
cat testrun-changed-files.txt || true
|
||||||
|
|
||||||
- name: Upload testrun-changed-files.txt
|
- name: Upload testrun-changed-files.txt
|
||||||
if: ${{ fromJSON(steps.define-testrun.outputs.testrun)['type'] != 'full' }}
|
if: ${{ fromJSON(steps.workflow-config.outputs.config)['testrun']['type'] != 'full' }}
|
||||||
uses: actions/upload-artifact@v4
|
uses: actions/upload-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: testrun-changed-files.txt
|
name: testrun-changed-files.txt
|
||||||
|
@ -501,7 +491,7 @@ jobs:
|
||||||
|
|
||||||
build-pkgs-onedir:
|
build-pkgs-onedir:
|
||||||
name: Build Packages
|
name: Build Packages
|
||||||
if: ${{ fromJSON(needs.prepare-workflow.outputs.jobs)['build-pkgs'] }}
|
if: ${{ fromJSON(needs.prepare-workflow.outputs.config)['jobs']['build-pkgs'] }}
|
||||||
needs:
|
needs:
|
||||||
- prepare-workflow
|
- prepare-workflow
|
||||||
- build-salt-onedir
|
- build-salt-onedir
|
||||||
|
@ -521,7 +511,7 @@ jobs:
|
||||||
|
|
||||||
build-pkgs-src:
|
build-pkgs-src:
|
||||||
name: Build Packages
|
name: Build Packages
|
||||||
if: ${{ fromJSON(needs.prepare-workflow.outputs.jobs)['build-pkgs'] }}
|
if: ${{ fromJSON(needs.prepare-workflow.outputs.config)['jobs']['build-pkgs'] }}
|
||||||
needs:
|
needs:
|
||||||
- prepare-workflow
|
- prepare-workflow
|
||||||
- build-salt-onedir
|
- build-salt-onedir
|
||||||
|
@ -540,7 +530,7 @@ jobs:
|
||||||
secrets: inherit
|
secrets: inherit
|
||||||
build-ci-deps:
|
build-ci-deps:
|
||||||
name: CI Deps
|
name: CI Deps
|
||||||
if: ${{ fromJSON(needs.prepare-workflow.outputs.jobs)['build-deps-ci'] }}
|
if: ${{ fromJSON(needs.prepare-workflow.outputs.config)['jobs']['build-deps-ci'] }}
|
||||||
needs:
|
needs:
|
||||||
- prepare-workflow
|
- prepare-workflow
|
||||||
- build-salt-onedir
|
- build-salt-onedir
|
||||||
|
@ -583,7 +573,7 @@ jobs:
|
||||||
nox-session: ci-test-onedir
|
nox-session: ci-test-onedir
|
||||||
nox-version: 2022.8.7
|
nox-version: 2022.8.7
|
||||||
python-version: "3.10"
|
python-version: "3.10"
|
||||||
testrun: ${{ needs.prepare-workflow.outputs.testrun }}
|
testrun: ${{ toJSON(fromJSON(needs.prepare-workflow.outputs.config)['testrun']) }}
|
||||||
salt-version: "${{ needs.prepare-workflow.outputs.salt-version }}"
|
salt-version: "${{ needs.prepare-workflow.outputs.salt-version }}"
|
||||||
cache-prefix: ${{ needs.prepare-workflow.outputs.cache-seed }}|3.10.15
|
cache-prefix: ${{ needs.prepare-workflow.outputs.cache-seed }}|3.10.15
|
||||||
skip-code-coverage: true
|
skip-code-coverage: true
|
||||||
|
@ -701,7 +691,6 @@ jobs:
|
||||||
salt-version: "${{ needs.prepare-workflow.outputs.salt-version }}"
|
salt-version: "${{ needs.prepare-workflow.outputs.salt-version }}"
|
||||||
matrix: ${{ toJSON(fromJSON(needs.prepare-workflow.outputs.config)['artifact-matrix']) }}
|
matrix: ${{ toJSON(fromJSON(needs.prepare-workflow.outputs.config)['artifact-matrix']) }}
|
||||||
build-matrix: ${{ toJSON(fromJSON(needs.prepare-workflow.outputs.config)['build-matrix']) }}
|
build-matrix: ${{ toJSON(fromJSON(needs.prepare-workflow.outputs.config)['build-matrix']) }}
|
||||||
|
|
||||||
set-pipeline-exit-status:
|
set-pipeline-exit-status:
|
||||||
# This step is just so we can make github require this step, to pass checks
|
# This step is just so we can make github require this step, to pass checks
|
||||||
# on a pull request instead of requiring all
|
# on a pull request instead of requiring all
|
||||||
|
@ -735,8 +724,3 @@ jobs:
|
||||||
else
|
else
|
||||||
exit 0
|
exit 0
|
||||||
fi
|
fi
|
||||||
|
|
||||||
- name: Done
|
|
||||||
if: always()
|
|
||||||
run:
|
|
||||||
echo "All worflows finished"
|
|
||||||
|
|
|
@ -4,7 +4,7 @@
|
||||||
<%- do test_salt_linux_needs.append("build-ci-deps") %>
|
<%- do test_salt_linux_needs.append("build-ci-deps") %>
|
||||||
name: CI Deps
|
name: CI Deps
|
||||||
<%- if workflow_slug != 'release' %>
|
<%- if workflow_slug != 'release' %>
|
||||||
if: ${{ fromJSON(needs.prepare-workflow.outputs.jobs)['build-deps-ci'] }}
|
if: ${{ fromJSON(needs.prepare-workflow.outputs.config)['jobs']['build-deps-ci'] }}
|
||||||
<%- endif %>
|
<%- endif %>
|
||||||
needs:
|
needs:
|
||||||
- prepare-workflow
|
- prepare-workflow
|
||||||
|
|
|
@ -11,7 +11,7 @@
|
||||||
|
|
||||||
<{ job_name }>:
|
<{ job_name }>:
|
||||||
name: Build Packages
|
name: Build Packages
|
||||||
if: ${{ fromJSON(needs.prepare-workflow.outputs.jobs)['build-pkgs'] }}
|
if: ${{ fromJSON(needs.prepare-workflow.outputs.config)['jobs']['build-pkgs'] }}
|
||||||
needs:
|
needs:
|
||||||
- prepare-workflow
|
- prepare-workflow
|
||||||
- build-salt-onedir
|
- build-salt-onedir
|
||||||
|
|
2
.github/workflows/templates/ci.yml.jinja
vendored
2
.github/workflows/templates/ci.yml.jinja
vendored
|
@ -306,7 +306,7 @@
|
||||||
combine-all-code-coverage:
|
combine-all-code-coverage:
|
||||||
<%- do conclusion_needs.append("combine-all-code-coverage") %>
|
<%- do conclusion_needs.append("combine-all-code-coverage") %>
|
||||||
name: Combine Code Coverage
|
name: Combine Code Coverage
|
||||||
if: ${{ fromJSON(needs.prepare-workflow.outputs.testrun)['skip_code_coverage'] == false }}
|
if: ${{ fromJSON(needs.prepare-workflow.outputs.config)['skip_code_coverage'] == false }}
|
||||||
runs-on: ubuntu-22.04
|
runs-on: ubuntu-22.04
|
||||||
env:
|
env:
|
||||||
PIP_INDEX_URL: https://pypi.org/simple
|
PIP_INDEX_URL: https://pypi.org/simple
|
||||||
|
|
34
.github/workflows/templates/layout.yml.jinja
vendored
34
.github/workflows/templates/layout.yml.jinja
vendored
|
@ -5,7 +5,7 @@
|
||||||
<%- set prepare_workflow_skip_pkg_test_suite = prepare_workflow_skip_pkg_test_suite|default("") %>
|
<%- set prepare_workflow_skip_pkg_test_suite = prepare_workflow_skip_pkg_test_suite|default("") %>
|
||||||
<%- set prepare_workflow_skip_pkg_download_test_suite = prepare_workflow_skip_pkg_download_test_suite|default("") %>
|
<%- set prepare_workflow_skip_pkg_download_test_suite = prepare_workflow_skip_pkg_download_test_suite|default("") %>
|
||||||
<%- set prepare_workflow_salt_version_input = prepare_workflow_salt_version_input|default("") %>
|
<%- set prepare_workflow_salt_version_input = prepare_workflow_salt_version_input|default("") %>
|
||||||
<%- set skip_test_coverage_check = skip_test_coverage_check|default("${{ fromJSON(needs.prepare-workflow.outputs.testrun)['skip_code_coverage'] }}") %>
|
<%- set skip_test_coverage_check = skip_test_coverage_check|default("${{ fromJSON(needs.prepare-workflow.outputs.config)['skip_code_coverage'] }}") %>
|
||||||
<%- set gpg_key_id = "64CBBC8173D76B3F" %>
|
<%- set gpg_key_id = "64CBBC8173D76B3F" %>
|
||||||
<%- set prepare_actual_release = prepare_actual_release | default(False) %>
|
<%- set prepare_actual_release = prepare_actual_release | default(False) %>
|
||||||
<%- set gh_actions_workflows_python_version = "3.10" %>
|
<%- set gh_actions_workflows_python_version = "3.10" %>
|
||||||
|
@ -89,9 +89,7 @@ jobs:
|
||||||
<%- endfor %>
|
<%- endfor %>
|
||||||
<%- endif %>
|
<%- endif %>
|
||||||
outputs:
|
outputs:
|
||||||
jobs: ${{ steps.define-jobs.outputs.jobs }}
|
|
||||||
changed-files: ${{ steps.process-changed-files.outputs.changed-files }}
|
changed-files: ${{ steps.process-changed-files.outputs.changed-files }}
|
||||||
testrun: ${{ steps.define-testrun.outputs.testrun }}
|
|
||||||
salt-version: ${{ steps.setup-salt-version.outputs.salt-version }}
|
salt-version: ${{ steps.setup-salt-version.outputs.salt-version }}
|
||||||
cache-seed: ${{ steps.set-cache-seed.outputs.cache-seed }}
|
cache-seed: ${{ steps.set-cache-seed.outputs.cache-seed }}
|
||||||
latest-release: ${{ steps.get-salt-releases.outputs.latest-release }}
|
latest-release: ${{ steps.get-salt-releases.outputs.latest-release }}
|
||||||
|
@ -102,6 +100,8 @@ jobs:
|
||||||
config: ${{ steps.workflow-config.outputs.config }}
|
config: ${{ steps.workflow-config.outputs.config }}
|
||||||
env:
|
env:
|
||||||
LINUX_ARM_RUNNER: ${{ vars.LINUX_ARM_RUNNER }}
|
LINUX_ARM_RUNNER: ${{ vars.LINUX_ARM_RUNNER }}
|
||||||
|
FULL_TESTRUN_SLUGS: ${{ vars.FULL_TESTRUN_SLUGS }}
|
||||||
|
PR_TESTRUN_SLUGS: ${{ vars.PR_TESTRUN_SLUGS }}
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
|
@ -252,13 +252,6 @@ jobs:
|
||||||
run: |
|
run: |
|
||||||
echo '${{ steps.process-changed-files.outputs.changed-files }}' | jq -C '.'
|
echo '${{ steps.process-changed-files.outputs.changed-files }}' | jq -C '.'
|
||||||
|
|
||||||
- name: Define Jobs To Run
|
|
||||||
id: define-jobs
|
|
||||||
run: |
|
|
||||||
tools ci define-jobs<{ prepare_workflow_skip_test_suite }><{
|
|
||||||
prepare_workflow_skip_pkg_test_suite }><{ prepare_workflow_skip_pkg_download_test_suite
|
|
||||||
}> ${{ github.event_name }} changed-files.json
|
|
||||||
|
|
||||||
- name: Get Salt Releases
|
- name: Get Salt Releases
|
||||||
id: get-salt-releases
|
id: get-salt-releases
|
||||||
env:
|
env:
|
||||||
|
@ -273,11 +266,6 @@ jobs:
|
||||||
run: |
|
run: |
|
||||||
tools ci get-testing-releases ${{ join(fromJSON(steps.get-salt-releases.outputs.releases), ' ') }} --salt-version ${{ steps.setup-salt-version.outputs.salt-version }}
|
tools ci get-testing-releases ${{ join(fromJSON(steps.get-salt-releases.outputs.releases), ' ') }} --salt-version ${{ steps.setup-salt-version.outputs.salt-version }}
|
||||||
|
|
||||||
- name: Define Testrun
|
|
||||||
id: define-testrun
|
|
||||||
run: |
|
|
||||||
tools ci define-testrun ${{ github.event_name }} changed-files.json
|
|
||||||
|
|
||||||
- name: Define workflow config
|
- name: Define workflow config
|
||||||
id: workflow-config
|
id: workflow-config
|
||||||
run: |
|
run: |
|
||||||
|
@ -286,12 +274,12 @@ jobs:
|
||||||
}> ${{ steps.setup-salt-version.outputs.salt-version }} ${{ github.event_name }} changed-files.json
|
}> ${{ steps.setup-salt-version.outputs.salt-version }} ${{ github.event_name }} changed-files.json
|
||||||
|
|
||||||
- name: Check Contents of generated testrun-changed-files.txt
|
- name: Check Contents of generated testrun-changed-files.txt
|
||||||
if: ${{ fromJSON(steps.define-testrun.outputs.testrun)['type'] != 'full' }}
|
if: ${{ fromJSON(steps.workflow-config.outputs.config)['testrun']['type'] != 'full' }}
|
||||||
run: |
|
run: |
|
||||||
cat testrun-changed-files.txt || true
|
cat testrun-changed-files.txt || true
|
||||||
|
|
||||||
- name: Upload testrun-changed-files.txt
|
- name: Upload testrun-changed-files.txt
|
||||||
if: ${{ fromJSON(steps.define-testrun.outputs.testrun)['type'] != 'full' }}
|
if: ${{ fromJSON(steps.workflow-config.outputs.config)['testrun']['type'] != 'full' }}
|
||||||
uses: actions/upload-artifact@v4
|
uses: actions/upload-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: testrun-changed-files.txt
|
name: testrun-changed-files.txt
|
||||||
|
@ -305,18 +293,18 @@ jobs:
|
||||||
{# We can't yet use tokenless uploads with the codecov CLI
|
{# We can't yet use tokenless uploads with the codecov CLI
|
||||||
|
|
||||||
- name: Install Codecov CLI
|
- name: Install Codecov CLI
|
||||||
if: ${{ fromJSON(steps.define-testrun.outputs.testrun)['skip_code_coverage'] == false }}
|
if: ${{ fromJSON(steps.define-testrun.outputs.config)['skip_code_coverage'] == false }}
|
||||||
run: |
|
run: |
|
||||||
python3 -m pip install codecov-cli
|
python3 -m pip install codecov-cli
|
||||||
|
|
||||||
- name: Save Commit Metadata In Codecov
|
- name: Save Commit Metadata In Codecov
|
||||||
if: ${{ fromJSON(steps.define-testrun.outputs.testrun)['skip_code_coverage'] == false }}
|
if: ${{ fromJSON(steps.define-testrun.outputs.config)['skip_code_coverage'] == false }}
|
||||||
run: |
|
run: |
|
||||||
codecovcli --auto-load-params-from GithubActions --verbose --token ${{ secrets.CODECOV_TOKEN }} \
|
codecovcli --auto-load-params-from GithubActions --verbose --token ${{ secrets.CODECOV_TOKEN }} \
|
||||||
create-commit --git-service github --sha ${{ github.sha }}
|
create-commit --git-service github --sha ${{ github.sha }}
|
||||||
|
|
||||||
- name: Create Codecov Coverage Report
|
- name: Create Codecov Coverage Report
|
||||||
if: ${{ fromJSON(steps.define-testrun.outputs.testrun)['skip_code_coverage'] == false }}
|
if: ${{ fromJSON(steps.define-testrun.outputs.config)['skip_code_coverage'] == false }}
|
||||||
run: |
|
run: |
|
||||||
codecovcli --auto-load-params-from GithubActions --verbose --token ${{ secrets.CODECOV_TOKEN }} \
|
codecovcli --auto-load-params-from GithubActions --verbose --token ${{ secrets.CODECOV_TOKEN }} \
|
||||||
create-report --git-service github --sha ${{ github.sha }}
|
create-report --git-service github --sha ${{ github.sha }}
|
||||||
|
@ -327,7 +315,6 @@ jobs:
|
||||||
<%- endif %>
|
<%- endif %>
|
||||||
|
|
||||||
<%- endblock jobs %>
|
<%- endblock jobs %>
|
||||||
|
|
||||||
set-pipeline-exit-status:
|
set-pipeline-exit-status:
|
||||||
# This step is just so we can make github require this step, to pass checks
|
# This step is just so we can make github require this step, to pass checks
|
||||||
# on a pull request instead of requiring all
|
# on a pull request instead of requiring all
|
||||||
|
@ -373,8 +360,3 @@ jobs:
|
||||||
else
|
else
|
||||||
exit 0
|
exit 0
|
||||||
fi
|
fi
|
||||||
|
|
||||||
- name: Done
|
|
||||||
if: always()
|
|
||||||
run:
|
|
||||||
echo "All worflows finished"
|
|
||||||
|
|
|
@ -6,7 +6,7 @@
|
||||||
<%- do conclusion_needs.append(job_name) %>
|
<%- do conclusion_needs.append(job_name) %>
|
||||||
name: Package Downloads
|
name: Package Downloads
|
||||||
<%- if gh_environment == "staging" %>
|
<%- if gh_environment == "staging" %>
|
||||||
if: ${{ fromJSON(needs.prepare-workflow.outputs.jobs)['test-pkg-download'] }}
|
if: ${{ fromJSON(needs.prepare-workflow.outputs.config)['jobs']['test-pkg-download'] }}
|
||||||
<%- else %>
|
<%- else %>
|
||||||
if: ${{ inputs.skip-salt-pkg-download-test-suite == false }}
|
if: ${{ inputs.skip-salt-pkg-download-test-suite == false }}
|
||||||
<%- endif %>
|
<%- endif %>
|
||||||
|
|
|
@ -14,7 +14,7 @@
|
||||||
nox-session: ci-test-onedir
|
nox-session: ci-test-onedir
|
||||||
nox-version: <{ nox_version }>
|
nox-version: <{ nox_version }>
|
||||||
python-version: "<{ gh_actions_workflows_python_version }>"
|
python-version: "<{ gh_actions_workflows_python_version }>"
|
||||||
testrun: ${{ needs.prepare-workflow.outputs.testrun }}
|
testrun: ${{ toJSON(fromJSON(needs.prepare-workflow.outputs.config)['testrun']) }}
|
||||||
salt-version: "${{ needs.prepare-workflow.outputs.salt-version }}"
|
salt-version: "${{ needs.prepare-workflow.outputs.salt-version }}"
|
||||||
cache-prefix: ${{ needs.prepare-workflow.outputs.cache-seed }}|<{ python_version }>
|
cache-prefix: ${{ needs.prepare-workflow.outputs.cache-seed }}|<{ python_version }>
|
||||||
skip-code-coverage: <{ skip_test_coverage_check }>
|
skip-code-coverage: <{ skip_test_coverage_check }>
|
||||||
|
|
8
.github/workflows/test-action.yml
vendored
8
.github/workflows/test-action.yml
vendored
|
@ -71,7 +71,7 @@ jobs:
|
||||||
test-linux:
|
test-linux:
|
||||||
name: ${{ matrix.display_name }} ${{ matrix.tests-chunk }} ${{ matrix.transport }}${{ matrix.fips && '(fips)' || '' }}${{ matrix.test-group && ' ' || '' }}${{ matrix.test-group && matrix.test-group || '' }}
|
name: ${{ matrix.display_name }} ${{ matrix.tests-chunk }} ${{ matrix.transport }}${{ matrix.fips && '(fips)' || '' }}${{ matrix.test-group && ' ' || '' }}${{ matrix.test-group && matrix.test-group || '' }}
|
||||||
runs-on: ${{ matrix.arch == 'x86_64' && 'ubuntu-24.04' || inputs.linux_arm_runner }}
|
runs-on: ${{ matrix.arch == 'x86_64' && 'ubuntu-24.04' || inputs.linux_arm_runner }}
|
||||||
if: ${{ toJSON(fromJSON(inputs.matrix)['linux']) != '[]' }}
|
if: toJSON(fromJSON(inputs.matrix)['linux-x86_64']) != '[]'
|
||||||
# Full test runs. Each chunk should never take more than 2 hours.
|
# Full test runs. Each chunk should never take more than 2 hours.
|
||||||
# Partial test runs(no chunk parallelization), 6 Hours
|
# Partial test runs(no chunk parallelization), 6 Hours
|
||||||
timeout-minutes: ${{ fromJSON(inputs.testrun)['type'] == 'full' && inputs.default-timeout || 360 }}
|
timeout-minutes: ${{ fromJSON(inputs.testrun)['type'] == 'full' && inputs.default-timeout || 360 }}
|
||||||
|
@ -387,7 +387,7 @@ jobs:
|
||||||
test-linux-arm64:
|
test-linux-arm64:
|
||||||
name: ${{ matrix.display_name }} ${{ matrix.tests-chunk }} ${{ matrix.transport }}${{ matrix.fips && '(fips)' || '' }}${{ matrix.test-group && ' ' || '' }}${{ matrix.test-group && matrix.test-group || '' }}
|
name: ${{ matrix.display_name }} ${{ matrix.tests-chunk }} ${{ matrix.transport }}${{ matrix.fips && '(fips)' || '' }}${{ matrix.test-group && ' ' || '' }}${{ matrix.test-group && matrix.test-group || '' }}
|
||||||
runs-on: ${{ matrix.arch == 'x86_64' && 'ubuntu-22.04' || inputs.linux_arm_runner }}
|
runs-on: ${{ matrix.arch == 'x86_64' && 'ubuntu-22.04' || inputs.linux_arm_runner }}
|
||||||
if: ${{ toJSON(fromJSON(inputs.matrix)['linux']) != '[]' }}
|
if: toJSON(fromJSON(inputs.matrix)['linux-arm64']) != '[]'
|
||||||
# Full test runs. Each chunk should never take more than 2 hours.
|
# Full test runs. Each chunk should never take more than 2 hours.
|
||||||
# Partial test runs(no chunk parallelization), 6 Hours
|
# Partial test runs(no chunk parallelization), 6 Hours
|
||||||
timeout-minutes: ${{ fromJSON(inputs.testrun)['type'] == 'full' && inputs.default-timeout || 360 }}
|
timeout-minutes: ${{ fromJSON(inputs.testrun)['type'] == 'full' && inputs.default-timeout || 360 }}
|
||||||
|
@ -705,7 +705,7 @@ jobs:
|
||||||
runs-on: ${{ matrix.runner }}
|
runs-on: ${{ matrix.runner }}
|
||||||
# Full test runs. Each chunk should never take more than 2 hours.
|
# Full test runs. Each chunk should never take more than 2 hours.
|
||||||
# Partial test runs(no chunk parallelization), 6 Hours
|
# Partial test runs(no chunk parallelization), 6 Hours
|
||||||
if: ${{ toJSON(fromJSON(inputs.matrix)['macos']) != '[]' }}
|
if: toJSON(fromJSON(inputs.matrix)['macos']) != '[]'
|
||||||
timeout-minutes: ${{ fromJSON(inputs.testrun)['type'] == 'full' && inputs.default-timeout || 360 }}
|
timeout-minutes: ${{ fromJSON(inputs.testrun)['type'] == 'full' && inputs.default-timeout || 360 }}
|
||||||
strategy:
|
strategy:
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
|
@ -983,7 +983,7 @@ jobs:
|
||||||
test-windows:
|
test-windows:
|
||||||
name: ${{ matrix.display_name }} ${{ matrix.tests-chunk }} ${{ matrix.transport }}${{ matrix.test-group && ' ' || '' }}${{ matrix.test-group && matrix.test-group || '' }}
|
name: ${{ matrix.display_name }} ${{ matrix.tests-chunk }} ${{ matrix.transport }}${{ matrix.test-group && ' ' || '' }}${{ matrix.test-group && matrix.test-group || '' }}
|
||||||
|
|
||||||
if: ${{ toJSON(fromJSON(inputs.matrix)['windows']) != '[]' }}
|
if: toJSON(fromJSON(inputs.matrix)['windows']) != '[]'
|
||||||
runs-on: ${{ matrix.slug }}
|
runs-on: ${{ matrix.slug }}
|
||||||
# Full test runs. Each chunk should never take more than 2 hours.
|
# Full test runs. Each chunk should never take more than 2 hours.
|
||||||
# Partial test runs(no chunk parallelization), 6 Hours
|
# Partial test runs(no chunk parallelization), 6 Hours
|
||||||
|
|
4
.github/workflows/test-packages-action.yml
vendored
4
.github/workflows/test-packages-action.yml
vendored
|
@ -125,6 +125,10 @@ jobs:
|
||||||
with:
|
with:
|
||||||
name: nox-linux-${{ matrix.arch }}-${{ inputs.nox-session }}
|
name: nox-linux-${{ matrix.arch }}-${{ inputs.nox-session }}
|
||||||
|
|
||||||
|
- name: "Ensure docker is running"
|
||||||
|
run: |
|
||||||
|
sudo systemctl start containerd || exit 0
|
||||||
|
|
||||||
- name: "Pull container ${{ matrix.container }}"
|
- name: "Pull container ${{ matrix.container }}"
|
||||||
run: |
|
run: |
|
||||||
docker pull ${{ matrix.container }}
|
docker pull ${{ matrix.container }}
|
||||||
|
|
30
.github/workflows/workflow-finished.yml
vendored
Normal file
30
.github/workflows/workflow-finished.yml
vendored
Normal file
|
@ -0,0 +1,30 @@
|
||||||
|
name: Workflow Finished
|
||||||
|
run-name: Workflow Finished ${{ github.event.workflow_run.display_title }} (${{ github.event.workflow_run.conclusion }})
|
||||||
|
|
||||||
|
on:
|
||||||
|
workflow_run:
|
||||||
|
workflows:
|
||||||
|
- CI
|
||||||
|
- Nightly
|
||||||
|
- Scheduled
|
||||||
|
- Stage Release
|
||||||
|
types:
|
||||||
|
- completed
|
||||||
|
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
pull-requests: read
|
||||||
|
actions: write
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
|
||||||
|
restart-failed-jobs:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
if: ${{ github.event.workflow_run.conclusion == 'failure' && github.event.workflow_run.run_attempt < 5 }}
|
||||||
|
steps:
|
||||||
|
- name: Restart failed jobs
|
||||||
|
env:
|
||||||
|
GH_REPO: ${{ github.repository }}
|
||||||
|
GH_TOKEN: ${{ github.token }}
|
||||||
|
run: |
|
||||||
|
gh run rerun ${{ github.event.workflow_run.id }} --failed
|
2
changelog/66992.fixed.md
Normal file
2
changelog/66992.fixed.md
Normal file
|
@ -0,0 +1,2 @@
|
||||||
|
Fixes an issue with the LGPO module when trying to parse ADMX/ADML files
|
||||||
|
that have a space in the XMLNS url in the policyDefinitionsResources header.
|
1
changelog/67017.fixed.md
Normal file
1
changelog/67017.fixed.md
Normal file
|
@ -0,0 +1 @@
|
||||||
|
Update for deprecation of hex in pygit2 1.15.0 and above
|
1
changelog/67019.fixed.md
Normal file
1
changelog/67019.fixed.md
Normal file
|
@ -0,0 +1 @@
|
||||||
|
Fixed blob path for salt.ufw in the firewall tutorial documentation
|
1
changelog/67020.fixed.md
Normal file
1
changelog/67020.fixed.md
Normal file
|
@ -0,0 +1 @@
|
||||||
|
Update locations for bootstrap scripts, to new infrastructure, GitHub releases for bootstrap
|
1
changelog/67058.fixed.md
Normal file
1
changelog/67058.fixed.md
Normal file
|
@ -0,0 +1 @@
|
||||||
|
Recognise newer AMD GPU devices
|
2
changelog/67122.fixed.md
Normal file
2
changelog/67122.fixed.md
Normal file
|
@ -0,0 +1,2 @@
|
||||||
|
Fixed an issue with making changes to the Windows Firewall when the
|
||||||
|
AllowInboundRules setting is set to True
|
|
@ -4,6 +4,14 @@ relenv_version: "0.18.0"
|
||||||
release_branches:
|
release_branches:
|
||||||
- "3006.x"
|
- "3006.x"
|
||||||
- "3007.x"
|
- "3007.x"
|
||||||
mandatory_os_slugs:
|
pr-testrun-slugs:
|
||||||
- ubuntu-22.04
|
- ubuntu-24.04-pkg
|
||||||
- ubuntu-22.04-arm64
|
- ubuntu-24.04
|
||||||
|
- rockylinux-9
|
||||||
|
- rockylinux-9-pkg
|
||||||
|
- windows-2022
|
||||||
|
- windows-2022-pkg
|
||||||
|
- macos-15
|
||||||
|
- macos-15-pkg
|
||||||
|
full-testrun-slugs:
|
||||||
|
- all
|
||||||
|
|
|
@ -147,8 +147,7 @@ Install or upgrade Salt
|
||||||
-----------------------
|
-----------------------
|
||||||
Ensure your Salt masters are running at least Salt version 3004. For instructions
|
Ensure your Salt masters are running at least Salt version 3004. For instructions
|
||||||
on installing or upgrading Salt, see the
|
on installing or upgrading Salt, see the
|
||||||
`Salt Install Guide <https://docs.saltproject.io/salt/install-guide/en/latest/>`__.
|
`Salt Install Guide <https://docs.saltproject.io/salt/install-guide/en/latest/>`_.
|
||||||
|
|
||||||
|
|
||||||
.. _delta-proxy-install:
|
.. _delta-proxy-install:
|
||||||
|
|
||||||
|
|
|
@ -174,8 +174,8 @@ exception is raised, causing the rendering to fail with the following message:
|
||||||
|
|
||||||
TemplateError: Custom Error
|
TemplateError: Custom Error
|
||||||
|
|
||||||
Filters
|
Custom Filters
|
||||||
=======
|
==============
|
||||||
|
|
||||||
Saltstack extends `builtin filters`_ with these custom filters:
|
Saltstack extends `builtin filters`_ with these custom filters:
|
||||||
|
|
||||||
|
@ -405,8 +405,9 @@ This text will be wrapped in quotes.
|
||||||
|
|
||||||
.. versionadded:: 2017.7.0
|
.. versionadded:: 2017.7.0
|
||||||
|
|
||||||
Scan through string looking for a location where this regular expression
|
Looks for a match for the specified regex anywhere in the string. If the string
|
||||||
produces a match. Returns ``None`` in case there were no matches found
|
does not match the regex, this filter returns ``None``. If the string _does_
|
||||||
|
match the regex, then the `capture groups`_ for the regex will be returned.
|
||||||
|
|
||||||
Example:
|
Example:
|
||||||
|
|
||||||
|
@ -420,6 +421,29 @@ Returns:
|
||||||
|
|
||||||
("defabcdef",)
|
("defabcdef",)
|
||||||
|
|
||||||
|
If the regex you use does not contain a capture group then the number of
|
||||||
|
capture groups will be zero, and a matching regex will return an empty tuple.
|
||||||
|
This means that the following ``if`` statement would evaluate as ``False``:
|
||||||
|
|
||||||
|
.. code-block:: jinja
|
||||||
|
|
||||||
|
{%- if 'foobar' | regex_search('foo') %}
|
||||||
|
|
||||||
|
If you do not need a capture group and are just looking to test if a string
|
||||||
|
matches a regex, then you should check to see if the filter returns ``None``:
|
||||||
|
|
||||||
|
.. code-block:: jinja
|
||||||
|
|
||||||
|
{%- if (some_var | regex_search('foo')) is not none %}
|
||||||
|
|
||||||
|
.. note::
|
||||||
|
|
||||||
|
In a Jinja statement, a null value (i.e. a Python ``None``) should be
|
||||||
|
expressed as ``none`` (i.e. lowercase). More info on this can be found in
|
||||||
|
the **Note** section here in the `jinja docs`_.
|
||||||
|
|
||||||
|
.. _`capture groups`: https://docs.python.org/3/library/re.html#re.Match.groups
|
||||||
|
.. _`jinja docs`: https://jinja.palletsprojects.com/en/stable/templates/#literals
|
||||||
|
|
||||||
.. jinja_ref:: regex_match
|
.. jinja_ref:: regex_match
|
||||||
|
|
||||||
|
@ -428,8 +452,8 @@ Returns:
|
||||||
|
|
||||||
.. versionadded:: 2017.7.0
|
.. versionadded:: 2017.7.0
|
||||||
|
|
||||||
If zero or more characters at the beginning of string match this regular
|
Works exactly like :jinja_ref:`regex_search`, but only checks for matches at
|
||||||
expression, otherwise returns ``None``.
|
the _beginning_ of the string passed into this filter.
|
||||||
|
|
||||||
Example:
|
Example:
|
||||||
|
|
||||||
|
|
|
@ -176,7 +176,7 @@ to allow traffic on ``tcp/4505`` and ``tcp/4506``:
|
||||||
**Ubuntu**
|
**Ubuntu**
|
||||||
|
|
||||||
Salt installs firewall rules in :blob:`/etc/ufw/applications.d/salt.ufw
|
Salt installs firewall rules in :blob:`/etc/ufw/applications.d/salt.ufw
|
||||||
<pkg/salt.ufw>`. Enable with:
|
<pkg/common/salt.ufw>`. Enable with:
|
||||||
|
|
||||||
.. code-block:: bash
|
.. code-block:: bash
|
||||||
|
|
||||||
|
|
|
@ -176,13 +176,10 @@ $BUILD_DIR = "$SCRIPT_DIR\buildenv"
|
||||||
$RELENV_DIR = "${env:LOCALAPPDATA}\relenv"
|
$RELENV_DIR = "${env:LOCALAPPDATA}\relenv"
|
||||||
$SYS_PY_BIN = (python -c "import sys; print(sys.executable)")
|
$SYS_PY_BIN = (python -c "import sys; print(sys.executable)")
|
||||||
$BLD_PY_BIN = "$BUILD_DIR\Scripts\python.exe"
|
$BLD_PY_BIN = "$BUILD_DIR\Scripts\python.exe"
|
||||||
$SALT_DEP_URL = "https://repo.saltproject.io/windows/dependencies"
|
|
||||||
|
|
||||||
if ( $Architecture -eq "x64" ) {
|
if ( $Architecture -eq "x64" ) {
|
||||||
$SALT_DEP_URL = "$SALT_DEP_URL/64"
|
|
||||||
$ARCH = "amd64"
|
$ARCH = "amd64"
|
||||||
} else {
|
} else {
|
||||||
$SALT_DEP_URL = "$SALT_DEP_URL/32"
|
|
||||||
$ARCH = "x86"
|
$ARCH = "x86"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -249,7 +246,7 @@ if ( $env:VIRTUAL_ENV ) {
|
||||||
#-------------------------------------------------------------------------------
|
#-------------------------------------------------------------------------------
|
||||||
# Installing Relenv
|
# Installing Relenv
|
||||||
#-------------------------------------------------------------------------------
|
#-------------------------------------------------------------------------------
|
||||||
Write-Host "Installing Relenv: " -NoNewLine
|
Write-Host "Installing Relenv ($RelenvVersion): " -NoNewLine
|
||||||
pip install relenv==$RelenvVersion --disable-pip-version-check | Out-Null
|
pip install relenv==$RelenvVersion --disable-pip-version-check | Out-Null
|
||||||
$output = pip list --disable-pip-version-check
|
$output = pip list --disable-pip-version-check
|
||||||
if ("relenv" -in $output.split()) {
|
if ("relenv" -in $output.split()) {
|
||||||
|
|
|
@ -81,11 +81,6 @@ $ARCH = $(. $PYTHON_BIN -c "import platform; print(platform.architectur
|
||||||
# Script Variables
|
# Script Variables
|
||||||
$PROJECT_DIR = $(git rev-parse --show-toplevel)
|
$PROJECT_DIR = $(git rev-parse --show-toplevel)
|
||||||
$SALT_DEPS = "$PROJECT_DIR\requirements\static\pkg\py$PY_VERSION\windows.txt"
|
$SALT_DEPS = "$PROJECT_DIR\requirements\static\pkg\py$PY_VERSION\windows.txt"
|
||||||
if ( $ARCH -eq "64bit" ) {
|
|
||||||
$SALT_DEP_URL = "https://repo.saltproject.io/windows/dependencies/64"
|
|
||||||
} else {
|
|
||||||
$SALT_DEP_URL = "https://repo.saltproject.io/windows/dependencies/32"
|
|
||||||
}
|
|
||||||
|
|
||||||
if ( ! $SkipInstall ) {
|
if ( ! $SkipInstall ) {
|
||||||
#-------------------------------------------------------------------------------
|
#-------------------------------------------------------------------------------
|
||||||
|
|
|
@ -1,219 +0,0 @@
|
||||||
:: ############################################################################
|
|
||||||
::
|
|
||||||
:: FILE: sign.bat
|
|
||||||
::
|
|
||||||
:: DESCRIPTION: Signing and Hashing script for Salt builds on Windows.
|
|
||||||
:: Requires an official Code Signing Certificate and drivers
|
|
||||||
:: installed to sign the files. Generates hashes in MD5 and
|
|
||||||
:: SHA256 in a file of the same name with a `.md5` or
|
|
||||||
:: `.sha256` extension.
|
|
||||||
::
|
|
||||||
:: NOTE: This script is used internally by SaltStack to sign and
|
|
||||||
:: hash Windows Installer builds and uses resources not
|
|
||||||
:: available to the community, such as SaltStack's Code
|
|
||||||
:: Signing Certificate. It is placed here for version
|
|
||||||
:: control.
|
|
||||||
::
|
|
||||||
:: COPYRIGHT: (c) 2012-2018 by the SaltStack Team
|
|
||||||
::
|
|
||||||
:: LICENSE: Apache 2.0
|
|
||||||
:: ORGANIZATION: SaltStack, Inc (saltstack.com)
|
|
||||||
:: CREATED: 2017
|
|
||||||
::
|
|
||||||
:: ############################################################################
|
|
||||||
::
|
|
||||||
:: USAGE: The script must be located in a directory that has the installer
|
|
||||||
:: files in a sub-folder named with the major version, ie: `2018.3`.
|
|
||||||
:: Insert the key fob that contains the code signing certificate. Run
|
|
||||||
:: the script passing the full version: `.\sign.bat 2018.3.1`.
|
|
||||||
::
|
|
||||||
:: The script will sign the installers and generate the corresponding
|
|
||||||
:: hash files. These can then be uploaded to the salt repo.
|
|
||||||
::
|
|
||||||
:: The files must be in the following format:
|
|
||||||
:: <Series>\Salt-Minion-<Version>-<Python Version>-<System Architecture>-Setup.exe
|
|
||||||
:: So, for a Salt Minion installer for 2018.3.1 on AMD64 for Python 3
|
|
||||||
:: file would be placed in a subdirectory named `2018.3` and the file
|
|
||||||
:: would be named: `Salt-Minion-2018.3.1-Py3-AMD64-Setup.exe`. This
|
|
||||||
:: is how the file is created by the NSI Script anyway.
|
|
||||||
::
|
|
||||||
:: You can test the timestamp server with the following command:
|
|
||||||
:: curl -i timestamp.digicert.com/timestamp/health
|
|
||||||
::
|
|
||||||
:: REQUIREMENTS: This script requires the ``signtool.exe`` binary that is a part
|
|
||||||
:: of the Windows SDK. To install just the ``signtool.exe``:
|
|
||||||
::
|
|
||||||
:: OPTION 1:
|
|
||||||
:: 1. Download the Windows 10 SDK ISO:
|
|
||||||
:: https://developer.microsoft.com/en-us/windows/downloads/windows-sdk/
|
|
||||||
:: 2. Mount the ISO and browse to the ``Installers`` directory
|
|
||||||
:: 3. Run the ``Windows SDK Signing Tools-x86_en-us.msi``
|
|
||||||
::
|
|
||||||
:: OPTION 2:
|
|
||||||
:: 1. Download the Visual Studio BUild Tools:
|
|
||||||
:: https://aka.ms/vs/15/release/vs_buildtools.exe
|
|
||||||
:: 2. Run the following command:
|
|
||||||
:: vs_buildtools.exe --quiet --add Microsoft.Component.ClickOnce.MSBuild
|
|
||||||
::
|
|
||||||
:: ############################################################################
|
|
||||||
@ echo off
|
|
||||||
if [%1]==[] (
|
|
||||||
echo You must pass a version
|
|
||||||
goto quit
|
|
||||||
) else (
|
|
||||||
set "Version=%~1"
|
|
||||||
)
|
|
||||||
|
|
||||||
set Series=%Version:~0,4%
|
|
||||||
|
|
||||||
if not exist .\%Series%\ (
|
|
||||||
echo - Series %Series% is not valid
|
|
||||||
exit 1
|
|
||||||
)
|
|
||||||
|
|
||||||
:: Sign Installer Files
|
|
||||||
echo ===========================================================================
|
|
||||||
echo Signing...
|
|
||||||
echo ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
|
||||||
signtool.exe sign /a /t http://timestamp.digicert.com ^
|
|
||||||
"%Series%\Salt-Minion-%Version%-AMD64-Setup.exe" ^
|
|
||||||
"%Series%\Salt-Minion-%Version%-x86-Setup.exe" ^
|
|
||||||
"%Series%\Salt-%Version%-AMD64-Setup.exe" ^
|
|
||||||
"%Series%\Salt-%Version%-x86-Setup.exe" ^
|
|
||||||
"%Series%\Salt-%Version%-Py2-AMD64-Setup.exe" ^
|
|
||||||
"%Series%\Salt-%Version%-Py2-x86-Setup.exe" ^
|
|
||||||
"%Series%\Salt-%Version%-Py3-AMD64-Setup.exe" ^
|
|
||||||
"%Series%\Salt-%Version%-Py3-x86-Setup.exe" ^
|
|
||||||
"%Series%\Salt-Minion-%Version%-Py2-AMD64-Setup.exe" ^
|
|
||||||
"%Series%\Salt-Minion-%Version%-Py2-x86-Setup.exe" ^
|
|
||||||
"%Series%\Salt-Minion-%Version%-Py3-AMD64-Setup.exe" ^
|
|
||||||
"%Series%\Salt-Minion-%Version%-Py3-x86-Setup.exe" ^
|
|
||||||
"%Series%\Salt-Minion-%Version%-Py3-AMD64.msi" ^
|
|
||||||
"%Series%\Salt-Minion-%Version%-Py3-x86.msi"
|
|
||||||
|
|
||||||
echo %ERRORLEVEL%
|
|
||||||
echo ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
|
||||||
echo Signing Complete
|
|
||||||
echo ===========================================================================
|
|
||||||
|
|
||||||
:: Create Hash files
|
|
||||||
echo ===========================================================================
|
|
||||||
echo Creating Hashes...
|
|
||||||
echo ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
|
||||||
set "file_name=Salt-Minion-%Version%-AMD64-Setup.exe"
|
|
||||||
set "file=.\%Series%\%file_name%"
|
|
||||||
if exist "%file%" (
|
|
||||||
echo - %file_name%
|
|
||||||
powershell -c "$hash = (Get-FileHash -Algorithm MD5 \"%file%\").Hash; Out-File -InputObject $hash\" %file_name%\" -FilePath \"%file%.md5\" -NoNewLine -Encoding ASCII"
|
|
||||||
powershell -c "$hash = (Get-FileHash -Algorithm SHA256 \"%file%\").Hash; Out-File -InputObject $hash\" %file_name%\" -FilePath \"%file%.sha256\" -NoNewLine -Encoding ASCII"
|
|
||||||
)
|
|
||||||
|
|
||||||
set "file_name=Salt-Minion-%Version%-x86-Setup.exe"
|
|
||||||
set "file=.\%Series%\%file_name%"
|
|
||||||
if exist "%file%" (
|
|
||||||
echo - %file_name%
|
|
||||||
powershell -c "$hash = (Get-FileHash -Algorithm MD5 \"%file%\").Hash; Out-File -InputObject $hash\" %file_name%\" -FilePath \"%file%.md5\" -NoNewLine -Encoding ASCII"
|
|
||||||
powershell -c "$hash = (Get-FileHash -Algorithm SHA256 \"%file%\").Hash; Out-File -InputObject $hash\" %file_name%\" -FilePath \"%file%.sha256\" -NoNewLine -Encoding ASCII"
|
|
||||||
)
|
|
||||||
|
|
||||||
set "file_name=Salt-%Version%-AMD64-Setup.exe"
|
|
||||||
set "file=.\%Series%\%file_name%"
|
|
||||||
if exist "%file%" (
|
|
||||||
echo - %file_name%
|
|
||||||
powershell -c "$hash = (Get-FileHash -Algorithm MD5 \"%file%\").Hash; Out-File -InputObject $hash\" %file_name%\" -FilePath \"%file%.md5\" -NoNewLine -Encoding ASCII"
|
|
||||||
powershell -c "$hash = (Get-FileHash -Algorithm SHA256 \"%file%\").Hash; Out-File -InputObject $hash\" %file_name%\" -FilePath \"%file%.sha256\" -NoNewLine -Encoding ASCII"
|
|
||||||
)
|
|
||||||
|
|
||||||
set "file_name=Salt-%Version%-x86-Setup.exe"
|
|
||||||
set "file=.\%Series%\%file_name%"
|
|
||||||
if exist "%file%" (
|
|
||||||
echo - %file_name%
|
|
||||||
powershell -c "$hash = (Get-FileHash -Algorithm MD5 \"%file%\").Hash; Out-File -InputObject $hash\" %file_name%\" -FilePath \"%file%.md5\" -NoNewLine -Encoding ASCII"
|
|
||||||
powershell -c "$hash = (Get-FileHash -Algorithm SHA256 \"%file%\").Hash; Out-File -InputObject $hash\" %file_name%\" -FilePath \"%file%.sha256\" -NoNewLine -Encoding ASCII"
|
|
||||||
)
|
|
||||||
|
|
||||||
set "file_name=Salt-%Version%-Py2-AMD64-Setup.exe"
|
|
||||||
set "file=.\%Series%\%file_name%"
|
|
||||||
if exist "%file%" (
|
|
||||||
echo - %file_name%
|
|
||||||
powershell -c "$hash = (Get-FileHash -Algorithm MD5 \"%file%\").Hash; Out-File -InputObject $hash\" %file_name%\" -FilePath \"%file%.md5\" -NoNewLine -Encoding ASCII"
|
|
||||||
powershell -c "$hash = (Get-FileHash -Algorithm SHA256 \"%file%\").Hash; Out-File -InputObject $hash\" %file_name%\" -FilePath \"%file%.sha256\" -NoNewLine -Encoding ASCII"
|
|
||||||
)
|
|
||||||
|
|
||||||
set "file_name=Salt-%Version%-Py2-x86-Setup.exe"
|
|
||||||
set "file=.\%Series%\%file_name%"
|
|
||||||
if exist "%file%" (
|
|
||||||
echo - %file_name%
|
|
||||||
powershell -c "$hash = (Get-FileHash -Algorithm MD5 \"%file%\").Hash; Out-File -InputObject $hash\" %file_name%\" -FilePath \"%file%.md5\" -NoNewLine -Encoding ASCII"
|
|
||||||
powershell -c "$hash = (Get-FileHash -Algorithm SHA256 \"%file%\").Hash; Out-File -InputObject $hash\" %file_name%\" -FilePath \"%file%.sha256\" -NoNewLine -Encoding ASCII"
|
|
||||||
)
|
|
||||||
|
|
||||||
set "file_name=Salt-%Version%-Py3-AMD64-Setup.exe"
|
|
||||||
set "file=.\%Series%\%file_name%"
|
|
||||||
if exist "%file%" (
|
|
||||||
echo - %file_name%
|
|
||||||
powershell -c "$hash = (Get-FileHash -Algorithm MD5 \"%file%\").Hash; Out-File -InputObject $hash\" %file_name%\" -FilePath \"%file%.md5\" -NoNewLine -Encoding ASCII"
|
|
||||||
powershell -c "$hash = (Get-FileHash -Algorithm SHA256 \"%file%\").Hash; Out-File -InputObject $hash\" %file_name%\" -FilePath \"%file%.sha256\" -NoNewLine -Encoding ASCII"
|
|
||||||
)
|
|
||||||
|
|
||||||
set "file_name=Salt-%Version%-Py3-x86-Setup.exe"
|
|
||||||
set "file=.\%Series%\%file_name%"
|
|
||||||
if exist "%file%" (
|
|
||||||
echo - %file_name%
|
|
||||||
powershell -c "$hash = (Get-FileHash -Algorithm MD5 \"%file%\").Hash; Out-File -InputObject $hash\" %file_name%\" -FilePath \"%file%.md5\" -NoNewLine -Encoding ASCII"
|
|
||||||
powershell -c "$hash = (Get-FileHash -Algorithm SHA256 \"%file%\").Hash; Out-File -InputObject $hash\" %file_name%\" -FilePath \"%file%.sha256\" -NoNewLine -Encoding ASCII"
|
|
||||||
)
|
|
||||||
|
|
||||||
set "file_name=Salt-Minion-%Version%-Py2-AMD64-Setup.exe"
|
|
||||||
set "file=.\%Series%\%file_name%"
|
|
||||||
if exist "%file%" (
|
|
||||||
echo - %file_name%
|
|
||||||
powershell -c "$hash = (Get-FileHash -Algorithm MD5 \"%file%\").Hash; Out-File -InputObject $hash\" %file_name%\" -FilePath \"%file%.md5\" -NoNewLine -Encoding ASCII"
|
|
||||||
powershell -c "$hash = (Get-FileHash -Algorithm SHA256 \"%file%\").Hash; Out-File -InputObject $hash\" %file_name%\" -FilePath \"%file%.sha256\" -NoNewLine -Encoding ASCII"
|
|
||||||
)
|
|
||||||
|
|
||||||
set "file_name=Salt-Minion-%Version%-Py2-x86-Setup.exe"
|
|
||||||
set "file=.\%Series%\%file_name%"
|
|
||||||
if exist "%file%" (
|
|
||||||
echo - %file_name%
|
|
||||||
powershell -c "$hash = (Get-FileHash -Algorithm MD5 \"%file%\").Hash; Out-File -InputObject $hash\" %file_name%\" -FilePath \"%file%.md5\" -NoNewLine -Encoding ASCII"
|
|
||||||
powershell -c "$hash = (Get-FileHash -Algorithm SHA256 \"%file%\").Hash; Out-File -InputObject $hash\" %file_name%\" -FilePath \"%file%.sha256\" -NoNewLine -Encoding ASCII"
|
|
||||||
)
|
|
||||||
|
|
||||||
set "file_name=Salt-Minion-%Version%-Py3-AMD64-Setup.exe"
|
|
||||||
set "file=.\%Series%\%file_name%"
|
|
||||||
if exist "%file%" (
|
|
||||||
echo - %file_name%
|
|
||||||
powershell -c "$hash = (Get-FileHash -Algorithm MD5 \"%file%\").Hash; Out-File -InputObject $hash\" %file_name%\" -FilePath \"%file%.md5\" -NoNewLine -Encoding ASCII"
|
|
||||||
powershell -c "$hash = (Get-FileHash -Algorithm SHA256 \"%file%\").Hash; Out-File -InputObject $hash\" %file_name%\" -FilePath \"%file%.sha256\" -NoNewLine -Encoding ASCII"
|
|
||||||
)
|
|
||||||
|
|
||||||
set "file_name=Salt-Minion-%Version%-Py3-x86-Setup.exe"
|
|
||||||
set "file=.\%Series%\%file_name%"
|
|
||||||
if exist "%file%" (
|
|
||||||
echo - %file_name%
|
|
||||||
powershell -c "$hash = (Get-FileHash -Algorithm MD5 \"%file%\").Hash; Out-File -InputObject $hash\" %file_name%\" -FilePath \"%file%.md5\" -NoNewLine -Encoding ASCII"
|
|
||||||
powershell -c "$hash = (Get-FileHash -Algorithm SHA256 \"%file%\").Hash; Out-File -InputObject $hash\" %file_name%\" -FilePath \"%file%.sha256\" -NoNewLine -Encoding ASCII"
|
|
||||||
)
|
|
||||||
|
|
||||||
set "file_name=Salt-Minion-%Version%-Py3-AMD64.msi"
|
|
||||||
set "file=.\%Series%\%file_name%"
|
|
||||||
if exist "%file%" (
|
|
||||||
echo - %file_name%
|
|
||||||
powershell -c "$hash = (Get-FileHash -Algorithm MD5 \"%file%\").Hash; Out-File -InputObject $hash\" %file_name%\" -FilePath \"%file%.md5\" -NoNewLine -Encoding ASCII"
|
|
||||||
powershell -c "$hash = (Get-FileHash -Algorithm SHA256 \"%file%\").Hash; Out-File -InputObject $hash\" %file_name%\" -FilePath \"%file%.sha256\" -NoNewLine -Encoding ASCII"
|
|
||||||
)
|
|
||||||
|
|
||||||
set "file_name=Salt-Minion-%Version%-Py3-x86.msi"
|
|
||||||
set "file=.\%Series%\%file_name%"
|
|
||||||
if exist "%file%" (
|
|
||||||
echo - %file_name%
|
|
||||||
powershell -c "$hash = (Get-FileHash -Algorithm MD5 \"%file%\").Hash; Out-File -InputObject $hash\" %file_name%\" -FilePath \"%file%.md5\" -NoNewLine -Encoding ASCII"
|
|
||||||
powershell -c "$hash = (Get-FileHash -Algorithm SHA256 \"%file%\").Hash; Out-File -InputObject $hash\" %file_name%\" -FilePath \"%file%.sha256\" -NoNewLine -Encoding ASCII"
|
|
||||||
)
|
|
||||||
|
|
||||||
echo ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
|
||||||
echo Hashing Complete
|
|
||||||
echo ===========================================================================
|
|
||||||
|
|
||||||
:quit
|
|
|
@ -1,27 +0,0 @@
|
||||||
#!/bin/bash
|
|
||||||
|
|
||||||
# This legacy script pre-dates the salt-bootstrap project. In most cases, the
|
|
||||||
# bootstrap-salt.sh script is the recommended script for installing salt onto
|
|
||||||
# a new minion. However, that may not be appropriate for all situations. This
|
|
||||||
# script remains to help fill those needs, and to provide an example for users
|
|
||||||
# needing to write their own deploy scripts.
|
|
||||||
|
|
||||||
rpm -Uvh --force http://mirrors.kernel.org/fedora-epel/5/x86_64/epel-release-5-4.noarch.rpm
|
|
||||||
yum install -y salt-minion git
|
|
||||||
rm -rf /usr/lib/python2.6/site-packages/salt*
|
|
||||||
rm -rf /usr/bin/salt-*
|
|
||||||
mkdir -p /root/git
|
|
||||||
cd /root/git
|
|
||||||
git clone git://github.com/saltstack/salt.git
|
|
||||||
cd salt
|
|
||||||
python26 setup.py install
|
|
||||||
cd
|
|
||||||
mkdir -p /etc/salt/pki
|
|
||||||
echo '{{ vm['priv_key'] }}' > /etc/salt/pki/minion.pem
|
|
||||||
echo '{{ vm['pub_key'] }}' > /etc/salt/pki/minion.pub
|
|
||||||
cat > /etc/salt/minion <<EOF
|
|
||||||
{{minion}}
|
|
||||||
EOF
|
|
||||||
|
|
||||||
/sbin/chkconfig salt-minion on
|
|
||||||
service salt-minion start
|
|
|
@ -1,19 +0,0 @@
|
||||||
#!/bin/bash
|
|
||||||
|
|
||||||
# This legacy script pre-dates the salt-bootstrap project. In most cases, the
|
|
||||||
# bootstrap-salt.sh script is the recommended script for installing salt onto
|
|
||||||
# a new minion. However, that may not be appropriate for all situations. This
|
|
||||||
# script remains to help fill those needs, and to provide an example for users
|
|
||||||
# needing to write their own deploy scripts.
|
|
||||||
|
|
||||||
rpm -Uvh --force http://mirrors.kernel.org/fedora-epel/5/x86_64/epel-release-5-4.noarch.rpm
|
|
||||||
yum install -y salt-minion
|
|
||||||
mkdir -p /etc/salt/pki
|
|
||||||
echo '{{ vm['priv_key'] }}' > /etc/salt/pki/minion.pem
|
|
||||||
echo '{{ vm['pub_key'] }}' > /etc/salt/pki/minion.pub
|
|
||||||
cat > /etc/salt/minion <<EOF
|
|
||||||
{{minion}}
|
|
||||||
EOF
|
|
||||||
|
|
||||||
/sbin/chkconfig salt-minion on
|
|
||||||
service salt-minion start
|
|
|
@ -1,27 +0,0 @@
|
||||||
#!/bin/bash
|
|
||||||
|
|
||||||
# This legacy script pre-dates the salt-bootstrap project. In most cases, the
|
|
||||||
# bootstrap-salt.sh script is the recommended script for installing salt onto
|
|
||||||
# a new minion. However, that may not be appropriate for all situations. This
|
|
||||||
# script remains to help fill those needs, and to provide an example for users
|
|
||||||
# needing to write their own deploy scripts.
|
|
||||||
|
|
||||||
rpm -Uvh --force http://mirrors.kernel.org/fedora-epel/6/x86_64/epel-release-6-8.noarch.rpm
|
|
||||||
yum -y install salt-minion git --enablerepo epel-testing
|
|
||||||
rm -rf /usr/lib/python/site-packages/salt*
|
|
||||||
rm -rf /usr/bin/salt-*
|
|
||||||
mkdir -p /root/git
|
|
||||||
cd /root/git
|
|
||||||
git clone git://github.com/saltstack/salt.git
|
|
||||||
cd salt
|
|
||||||
python setup.py install
|
|
||||||
cd
|
|
||||||
mkdir -p /etc/salt/pki
|
|
||||||
echo '{{ vm['priv_key'] }}' > /etc/salt/pki/minion.pem
|
|
||||||
echo '{{ vm['pub_key'] }}' > /etc/salt/pki/minion.pub
|
|
||||||
cat > /etc/salt/minion <<EOF
|
|
||||||
{{minion}}
|
|
||||||
EOF
|
|
||||||
|
|
||||||
/sbin/chkconfig salt-minion on
|
|
||||||
service salt-minion start
|
|
|
@ -1,19 +0,0 @@
|
||||||
#!/bin/bash
|
|
||||||
|
|
||||||
# This legacy script pre-dates the salt-bootstrap project. In most cases, the
|
|
||||||
# bootstrap-salt.sh script is the recommended script for installing salt onto
|
|
||||||
# a new minion. However, that may not be appropriate for all situations. This
|
|
||||||
# script remains to help fill those needs, and to provide an example for users
|
|
||||||
# needing to write their own deploy scripts.
|
|
||||||
|
|
||||||
rpm -Uvh --force http://mirrors.kernel.org/fedora-epel/6/x86_64/epel-release-6-8.noarch.rpm
|
|
||||||
yum -y install salt-minion --enablerepo epel-testing
|
|
||||||
mkdir -p /etc/salt/pki
|
|
||||||
echo '{{ vm['priv_key'] }}' > /etc/salt/pki/minion.pem
|
|
||||||
echo '{{ vm['pub_key'] }}' > /etc/salt/pki/minion.pub
|
|
||||||
cat > /etc/salt/minion <<EOF
|
|
||||||
{{minion}}
|
|
||||||
EOF
|
|
||||||
|
|
||||||
/sbin/chkconfig salt-minion on
|
|
||||||
service salt-minion start
|
|
File diff suppressed because it is too large
Load diff
|
@ -7,11 +7,11 @@
|
||||||
#
|
#
|
||||||
# It has been designed as an example, to be customized for your own needs.
|
# It has been designed as an example, to be customized for your own needs.
|
||||||
|
|
||||||
curl -L https://bootstrap.saltstack.com | sudo sh -s -- "$@" git develop
|
curl -L https://github.com/saltstack/salt-bootstrap/releases/latest/download/bootstrap-salt.sh | sudo sh -s -- "$@" git develop
|
||||||
|
|
||||||
# By default, Salt Cloud now places the minion's keys and configuration in
|
# By default, Salt Cloud now places the minion's keys and configuration in
|
||||||
# /tmp/.saltcloud/ before executing the deploy script. After it has executed,
|
# /tmp/.saltcloud/ before executing the deploy script. After it has executed,
|
||||||
# these temporary files are removed. If you don't want salt-bootstrap to handle
|
# these temporary files are removed. If you don't want salt-bootstrap to handle
|
||||||
# these files, comment out the above command, and uncomment the below command.
|
# these files, comment out the above command, and uncomment the below command.
|
||||||
|
|
||||||
#curl -L https://bootstrap.saltstack.com | sudo sh -s git develop
|
#curl -L https://github.com/saltstack/salt-bootstrap/releases/latest/download/bootstrap-salt.sh | sudo sh -s git develop
|
||||||
|
|
|
@ -7,11 +7,11 @@
|
||||||
#
|
#
|
||||||
# It has been designed as an example, to be customized for your own needs.
|
# It has been designed as an example, to be customized for your own needs.
|
||||||
|
|
||||||
curl -L https://bootstrap.saltstack.com | sudo sh -s -- "$@"
|
curl -L https://github.com/saltstack/salt-bootstrap/releases/latest/download/bootstrap-salt.sh | sudo sh -s -- "$@"
|
||||||
|
|
||||||
# By default, Salt Cloud now places the minion's keys and configuration in
|
# By default, Salt Cloud now places the minion's keys and configuration in
|
||||||
# /tmp/.saltcloud/ before executing the deploy script. After it has executed,
|
# /tmp/.saltcloud/ before executing the deploy script. After it has executed,
|
||||||
# these temporary files are removed. If you don't want salt-bootstrap to handle
|
# these temporary files are removed. If you don't want salt-bootstrap to handle
|
||||||
# these files, comment out the above command, and uncomment the below command.
|
# these files, comment out the above command, and uncomment the below command.
|
||||||
|
|
||||||
#curl -L https://bootstrap.saltstack.com | sudo sh
|
#curl -L https://github.com/saltstack/salt-bootstrap/releases/latest/download/bootstrap-salt.sh | sudo sh
|
||||||
|
|
|
@ -7,11 +7,11 @@
|
||||||
#
|
#
|
||||||
# It has been designed as an example, to be customized for your own needs.
|
# It has been designed as an example, to be customized for your own needs.
|
||||||
|
|
||||||
python -c 'import urllib; print urllib.urlopen("https://bootstrap.saltstack.com").read()' | sudo sh -s -- "$@"
|
python -c 'import urllib; print urllib.urlopen("https://github.com/saltstack/salt-bootstrap/releases/latest/download/bootstrap-salt.sh").read()' | sudo sh -s -- "$@"
|
||||||
|
|
||||||
# By default, Salt Cloud now places the minion's keys and configuration in
|
# By default, Salt Cloud now places the minion's keys and configuration in
|
||||||
# /tmp/.saltcloud/ before executing the deploy script. After it has executed,
|
# /tmp/.saltcloud/ before executing the deploy script. After it has executed,
|
||||||
# these temporary files are removed. If you don't want salt-bootstrap to handle
|
# these temporary files are removed. If you don't want salt-bootstrap to handle
|
||||||
# these files, comment out the above command, and uncomment the below command.
|
# these files, comment out the above command, and uncomment the below command.
|
||||||
|
|
||||||
#python -c 'import urllib; print urllib.urlopen("https://bootstrap.saltstack.com").read()' | sudo sh
|
#python -c 'import urllib; print urllib.urlopen("https://github.com/saltstack/salt-bootstrap/releases/latest/download/bootstrap-salt.sh").read()' | sudo sh
|
||||||
|
|
|
@ -7,11 +7,11 @@
|
||||||
#
|
#
|
||||||
# It has been designed as an example, to be customized for your own needs.
|
# It has been designed as an example, to be customized for your own needs.
|
||||||
|
|
||||||
wget --no-check-certificate -O - https://bootstrap.saltstack.com | sudo sh -s -- "$@"
|
wget --no-check-certificate -O - https://github.com/saltstack/salt-bootstrap/releases/latest/download/bootstrap-salt.sh | sudo sh -s -- "$@"
|
||||||
|
|
||||||
# By default, Salt Cloud now places the minion's keys and configuration in
|
# By default, Salt Cloud now places the minion's keys and configuration in
|
||||||
# /tmp/.saltcloud/ before executing the deploy script. After it has executed,
|
# /tmp/.saltcloud/ before executing the deploy script. After it has executed,
|
||||||
# these temporary files are removed. If you don't want salt-bootstrap to handle
|
# these temporary files are removed. If you don't want salt-bootstrap to handle
|
||||||
# these files, comment out the above command, and uncomment the below command.
|
# these files, comment out the above command, and uncomment the below command.
|
||||||
|
|
||||||
#wget --no-check-certificate -O - https://bootstrap.saltstack.com | sudo sh
|
#wget --no-check-certificate -O - https://github.com/saltstack/salt-bootstrap/releases/latest/download/bootstrap-salt.sh | sudo sh
|
||||||
|
|
|
@ -7,11 +7,11 @@
|
||||||
#
|
#
|
||||||
# It has been designed as an example, to be customized for your own needs.
|
# It has been designed as an example, to be customized for your own needs.
|
||||||
|
|
||||||
wget -O - https://bootstrap.saltstack.com | sudo sh -s -- "$@"
|
wget -O - https://github.com/saltstack/salt-bootstrap/releases/latest/download/bootstrap-salt.sh | sudo sh -s -- "$@"
|
||||||
|
|
||||||
# By default, Salt Cloud now places the minion's keys and configuration in
|
# By default, Salt Cloud now places the minion's keys and configuration in
|
||||||
# /tmp/.saltcloud/ before executing the deploy script. After it has executed,
|
# /tmp/.saltcloud/ before executing the deploy script. After it has executed,
|
||||||
# these temporary files are removed. If you don't want salt-bootstrap to handle
|
# these temporary files are removed. If you don't want salt-bootstrap to handle
|
||||||
# these files, comment out the above command, and uncomment the below command.
|
# these files, comment out the above command, and uncomment the below command.
|
||||||
|
|
||||||
#wget -O - https://bootstrap.saltstack.com | sudo sh
|
#wget -O - https://github.com/saltstack/salt-bootstrap/releases/latest/download/bootstrap-salt.sh | sudo sh
|
||||||
|
|
|
@ -287,7 +287,12 @@ def _linux_gpu_data():
|
||||||
"matrox",
|
"matrox",
|
||||||
"aspeed",
|
"aspeed",
|
||||||
]
|
]
|
||||||
gpu_classes = ("vga compatible controller", "3d controller", "display controller")
|
gpu_classes = (
|
||||||
|
"3d controller",
|
||||||
|
"display controller",
|
||||||
|
"processing accelerators",
|
||||||
|
"vga compatible controller",
|
||||||
|
)
|
||||||
|
|
||||||
devs = []
|
devs = []
|
||||||
try:
|
try:
|
||||||
|
|
278
salt/minion.py
278
salt/minion.py
|
@ -17,6 +17,7 @@ import threading
|
||||||
import time
|
import time
|
||||||
import traceback
|
import traceback
|
||||||
import types
|
import types
|
||||||
|
import uuid
|
||||||
|
|
||||||
import tornado
|
import tornado
|
||||||
import tornado.gen
|
import tornado.gen
|
||||||
|
@ -1071,8 +1072,10 @@ class MinionManager(MinionBase):
|
||||||
|
|
||||||
@tornado.gen.coroutine
|
@tornado.gen.coroutine
|
||||||
def handle_event(self, package):
|
def handle_event(self, package):
|
||||||
for minion in self.minions:
|
try:
|
||||||
minion.handle_event(package)
|
yield [_.handle_event(package) for _ in self.minions]
|
||||||
|
except Exception as exc: # pylint: disable=broad-except
|
||||||
|
log.error("Error dispatching event. %s", exc)
|
||||||
|
|
||||||
def _create_minion_object(
|
def _create_minion_object(
|
||||||
self,
|
self,
|
||||||
|
@ -1396,13 +1399,8 @@ class Minion(MinionBase):
|
||||||
self.req_channel = salt.channel.client.AsyncReqChannel.factory(
|
self.req_channel = salt.channel.client.AsyncReqChannel.factory(
|
||||||
self.opts, io_loop=self.io_loop
|
self.opts, io_loop=self.io_loop
|
||||||
)
|
)
|
||||||
|
log.debug("Connecting minion's long-running req channel")
|
||||||
if hasattr(
|
yield self.req_channel.connect()
|
||||||
self.req_channel, "connect"
|
|
||||||
): # TODO: consider generalizing this for all channels
|
|
||||||
log.debug("Connecting minion's long-running req channel")
|
|
||||||
yield self.req_channel.connect()
|
|
||||||
|
|
||||||
yield self._post_master_init(master)
|
yield self._post_master_init(master)
|
||||||
|
|
||||||
@tornado.gen.coroutine
|
@tornado.gen.coroutine
|
||||||
|
@ -1625,6 +1623,7 @@ class Minion(MinionBase):
|
||||||
return functions, returners, errors, executors
|
return functions, returners, errors, executors
|
||||||
|
|
||||||
def _send_req_sync(self, load, timeout):
|
def _send_req_sync(self, load, timeout):
|
||||||
|
# XXX: Signing should happen in RequestChannel to be fixed in 3008
|
||||||
if self.opts["minion_sign_messages"]:
|
if self.opts["minion_sign_messages"]:
|
||||||
log.trace("Signing event to be published onto the bus.")
|
log.trace("Signing event to be published onto the bus.")
|
||||||
minion_privkey_path = os.path.join(self.opts["pki_dir"], "minion.pem")
|
minion_privkey_path = os.path.join(self.opts["pki_dir"], "minion.pem")
|
||||||
|
@ -1632,18 +1631,25 @@ class Minion(MinionBase):
|
||||||
minion_privkey_path, salt.serializers.msgpack.serialize(load)
|
minion_privkey_path, salt.serializers.msgpack.serialize(load)
|
||||||
)
|
)
|
||||||
load["sig"] = sig
|
load["sig"] = sig
|
||||||
|
with salt.utils.event.get_event("minion", opts=self.opts, listen=True) as event:
|
||||||
with salt.utils.event.get_event(
|
request_id = str(uuid.uuid4())
|
||||||
"minion", opts=self.opts, listen=False
|
log.trace("Send request to main id=%s", request_id)
|
||||||
) as event:
|
event.fire_event(
|
||||||
return event.fire_event(
|
|
||||||
load,
|
load,
|
||||||
f"__master_req_channel_payload/{self.opts['master']}",
|
f"__master_req_channel_payload/{request_id}/{self.opts['master']}",
|
||||||
timeout=timeout,
|
timeout=timeout,
|
||||||
)
|
)
|
||||||
|
ret = event.get_event(
|
||||||
|
tag=f"__master_req_channel_return/{request_id}",
|
||||||
|
wait=timeout,
|
||||||
|
)
|
||||||
|
log.trace("Reply from main %s", request_id)
|
||||||
|
return ret["ret"]
|
||||||
|
|
||||||
@tornado.gen.coroutine
|
@tornado.gen.coroutine
|
||||||
def _send_req_async(self, load, timeout):
|
def _send_req_async(self, load, timeout):
|
||||||
|
# XXX: Signing should happen in RequestChannel to be fixed in 3008
|
||||||
|
# XXX: This is only used by syndic
|
||||||
if self.opts["minion_sign_messages"]:
|
if self.opts["minion_sign_messages"]:
|
||||||
log.trace("Signing event to be published onto the bus.")
|
log.trace("Signing event to be published onto the bus.")
|
||||||
minion_privkey_path = os.path.join(self.opts["pki_dir"], "minion.pem")
|
minion_privkey_path = os.path.join(self.opts["pki_dir"], "minion.pem")
|
||||||
|
@ -1651,31 +1657,49 @@ class Minion(MinionBase):
|
||||||
minion_privkey_path, salt.serializers.msgpack.serialize(load)
|
minion_privkey_path, salt.serializers.msgpack.serialize(load)
|
||||||
)
|
)
|
||||||
load["sig"] = sig
|
load["sig"] = sig
|
||||||
|
with salt.utils.event.get_event("minion", opts=self.opts, listen=True) as event:
|
||||||
with salt.utils.event.get_event(
|
request_id = str(uuid.uuid4())
|
||||||
"minion", opts=self.opts, listen=False
|
log.trace("Send request to main id=%s", request_id)
|
||||||
) as event:
|
yield event.fire_event_async(
|
||||||
ret = yield event.fire_event_async(
|
|
||||||
load,
|
load,
|
||||||
f"__master_req_channel_payload/{self.opts['master']}",
|
f"__master_req_channel_payload/{request_id}/{self.opts['master']}",
|
||||||
timeout=timeout,
|
timeout=timeout,
|
||||||
)
|
)
|
||||||
raise tornado.gen.Return(ret)
|
start = time.time()
|
||||||
|
while time.time() - start < timeout:
|
||||||
|
ret = event.get_event(
|
||||||
|
tag=f"__master_req_channel_return/{request_id}", no_block=True
|
||||||
|
)
|
||||||
|
if ret:
|
||||||
|
break
|
||||||
|
yield tornado.gen.sleep(0.3)
|
||||||
|
else:
|
||||||
|
raise TimeoutError("Did not recieve return event")
|
||||||
|
log.trace("Reply from main %s", request_id)
|
||||||
|
raise tornado.gen.Return(ret["ret"])
|
||||||
|
|
||||||
def _fire_master(
|
@tornado.gen.coroutine
|
||||||
self,
|
def _send_req_async_main(self, load, timeout):
|
||||||
data=None,
|
"""
|
||||||
tag=None,
|
Send a request to the master's request server. To be called from the
|
||||||
events=None,
|
top level process in the main thread only. Worker threads and
|
||||||
pretag=None,
|
processess should call _send_req_sync or _send_req_async as nessecery.
|
||||||
timeout=60,
|
"""
|
||||||
sync=True,
|
if self.opts["minion_sign_messages"]:
|
||||||
timeout_handler=None,
|
log.trace("Signing event to be published onto the bus.")
|
||||||
include_startup_grains=False,
|
minion_privkey_path = os.path.join(self.opts["pki_dir"], "minion.pem")
|
||||||
|
sig = salt.crypt.sign_message(
|
||||||
|
minion_privkey_path, salt.serializers.msgpack.serialize(load)
|
||||||
|
)
|
||||||
|
load["sig"] = sig
|
||||||
|
ret = yield self.req_channel.send(
|
||||||
|
load, timeout=timeout, tries=self.opts["return_retry_tries"]
|
||||||
|
)
|
||||||
|
raise tornado.gen.Return(ret)
|
||||||
|
|
||||||
|
def _fire_master_prepare(
|
||||||
|
self, data, tag, events, pretag, include_startup_grains=False
|
||||||
):
|
):
|
||||||
"""
|
|
||||||
Fire an event on the master, or drop message if unable to send.
|
|
||||||
"""
|
|
||||||
load = {
|
load = {
|
||||||
"id": self.opts["id"],
|
"id": self.opts["id"],
|
||||||
"cmd": "_minion_event",
|
"cmd": "_minion_event",
|
||||||
|
@ -1700,34 +1724,62 @@ class Minion(MinionBase):
|
||||||
if k in self.opts["start_event_grains"]
|
if k in self.opts["start_event_grains"]
|
||||||
}
|
}
|
||||||
load["grains"] = grains_to_add
|
load["grains"] = grains_to_add
|
||||||
|
return load
|
||||||
|
|
||||||
if sync:
|
@tornado.gen.coroutine
|
||||||
try:
|
def _fire_master_main(
|
||||||
self._send_req_sync(load, timeout)
|
self,
|
||||||
except salt.exceptions.SaltReqTimeoutError:
|
data=None,
|
||||||
|
tag=None,
|
||||||
|
events=None,
|
||||||
|
pretag=None,
|
||||||
|
timeout=60,
|
||||||
|
timeout_handler=None,
|
||||||
|
include_startup_grains=False,
|
||||||
|
):
|
||||||
|
load = self._fire_master_prepare(
|
||||||
|
data, tag, events, pretag, include_startup_grains
|
||||||
|
)
|
||||||
|
if timeout_handler is None:
|
||||||
|
|
||||||
|
def handle_timeout(*_):
|
||||||
log.info(
|
log.info(
|
||||||
"fire_master failed: master could not be contacted. Request timed"
|
"fire_master failed: master could not be contacted. Request"
|
||||||
" out."
|
" timed out."
|
||||||
)
|
)
|
||||||
return False
|
return True
|
||||||
except Exception: # pylint: disable=broad-except
|
|
||||||
log.info("fire_master failed: %s", traceback.format_exc())
|
|
||||||
return False
|
|
||||||
else:
|
|
||||||
if timeout_handler is None:
|
|
||||||
|
|
||||||
def handle_timeout(*_):
|
timeout_handler = handle_timeout
|
||||||
log.info(
|
|
||||||
"fire_master failed: master could not be contacted. Request"
|
|
||||||
" timed out."
|
|
||||||
)
|
|
||||||
return True
|
|
||||||
|
|
||||||
timeout_handler = handle_timeout
|
yield self._send_req_async_main(load, timeout)
|
||||||
|
|
||||||
# pylint: disable=unexpected-keyword-arg
|
def _fire_master(
|
||||||
self._send_req_async(load, timeout)
|
self,
|
||||||
# pylint: enable=unexpected-keyword-arg
|
data=None,
|
||||||
|
tag=None,
|
||||||
|
events=None,
|
||||||
|
pretag=None,
|
||||||
|
timeout=60,
|
||||||
|
timeout_handler=None,
|
||||||
|
include_startup_grains=False,
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
Fire an event on the master, or drop message if unable to send.
|
||||||
|
"""
|
||||||
|
load = self._fire_master_prepare(
|
||||||
|
data, tag, events, pretag, include_startup_grains
|
||||||
|
)
|
||||||
|
try:
|
||||||
|
self._send_req_sync(load, timeout)
|
||||||
|
except salt.exceptions.SaltReqTimeoutError:
|
||||||
|
log.info(
|
||||||
|
"fire_master failed: master could not be contacted. Request timed"
|
||||||
|
" out."
|
||||||
|
)
|
||||||
|
return False
|
||||||
|
except Exception: # pylint: disable=broad-except
|
||||||
|
log.info("fire_master failed: %s", traceback.format_exc())
|
||||||
|
return False
|
||||||
return True
|
return True
|
||||||
|
|
||||||
async def _handle_decoded_payload(self, data):
|
async def _handle_decoded_payload(self, data):
|
||||||
|
@ -2228,10 +2280,7 @@ class Minion(MinionBase):
|
||||||
except Exception as exc: # pylint: disable=broad-except
|
except Exception as exc: # pylint: disable=broad-except
|
||||||
log.error("The return failed for job %s: %s", data["jid"], exc)
|
log.error("The return failed for job %s: %s", data["jid"], exc)
|
||||||
|
|
||||||
def _return_pub(self, ret, ret_cmd="_return", timeout=60, sync=True):
|
def _prepare_return_pub(self, ret, ret_cmd="_return"):
|
||||||
"""
|
|
||||||
Return the data from the executed command to the master server
|
|
||||||
"""
|
|
||||||
jid = ret.get("jid", ret.get("__jid__"))
|
jid = ret.get("jid", ret.get("__jid__"))
|
||||||
fun = ret.get("fun", ret.get("__fun__"))
|
fun = ret.get("fun", ret.get("__fun__"))
|
||||||
if self.opts["multiprocessing"]:
|
if self.opts["multiprocessing"]:
|
||||||
|
@ -2285,7 +2334,12 @@ class Minion(MinionBase):
|
||||||
if ret["jid"] == "req":
|
if ret["jid"] == "req":
|
||||||
ret["jid"] = salt.utils.jid.gen_jid(self.opts)
|
ret["jid"] = salt.utils.jid.gen_jid(self.opts)
|
||||||
salt.utils.minion.cache_jobs(self.opts, ret["jid"], ret)
|
salt.utils.minion.cache_jobs(self.opts, ret["jid"], ret)
|
||||||
|
return load
|
||||||
|
|
||||||
|
@tornado.gen.coroutine
|
||||||
|
def _return_pub_main(self, ret, ret_cmd="_return", timeout=60):
|
||||||
|
jid = ret.get("jid", ret.get("__jid__"))
|
||||||
|
load = self._prepare_return_pub(ret, ret_cmd)
|
||||||
if not self.opts["pub_ret"]:
|
if not self.opts["pub_ret"]:
|
||||||
return ""
|
return ""
|
||||||
|
|
||||||
|
@ -2299,20 +2353,38 @@ class Minion(MinionBase):
|
||||||
)
|
)
|
||||||
return True
|
return True
|
||||||
|
|
||||||
if sync:
|
try:
|
||||||
try:
|
ret_val = yield self._send_req_async_main(load, timeout=timeout)
|
||||||
ret_val = self._send_req_sync(load, timeout=timeout)
|
except SaltReqTimeoutError:
|
||||||
except SaltReqTimeoutError:
|
timeout_handler()
|
||||||
timeout_handler()
|
ret_val = ""
|
||||||
return ""
|
log.trace("ret_val = %s", ret_val) # pylint: disable=no-member
|
||||||
else:
|
raise tornado.gen.Return(ret_val)
|
||||||
# pylint: disable=unexpected-keyword-arg
|
|
||||||
ret_val = self._send_req_async(
|
|
||||||
load,
|
|
||||||
timeout=timeout,
|
|
||||||
)
|
|
||||||
# pylint: enable=unexpected-keyword-arg
|
|
||||||
|
|
||||||
|
def _return_pub(self, ret, ret_cmd="_return", timeout=60):
|
||||||
|
"""
|
||||||
|
Return the data from the executed command to the master server
|
||||||
|
"""
|
||||||
|
jid = ret.get("jid", ret.get("__jid__"))
|
||||||
|
load = self._prepare_return_pub(ret, ret_cmd)
|
||||||
|
if not self.opts["pub_ret"]:
|
||||||
|
return ""
|
||||||
|
|
||||||
|
def timeout_handler(*_):
|
||||||
|
log.warning(
|
||||||
|
"The minion failed to return the job information for job %s. "
|
||||||
|
"This is often due to the master being shut down or "
|
||||||
|
"overloaded. If the master is running, consider increasing "
|
||||||
|
"the worker_threads value.",
|
||||||
|
jid,
|
||||||
|
)
|
||||||
|
return True
|
||||||
|
|
||||||
|
try:
|
||||||
|
ret_val = self._send_req_sync(load, timeout=timeout)
|
||||||
|
except SaltReqTimeoutError:
|
||||||
|
timeout_handler()
|
||||||
|
return ""
|
||||||
log.trace("ret_val = %s", ret_val) # pylint: disable=no-member
|
log.trace("ret_val = %s", ret_val) # pylint: disable=no-member
|
||||||
return ret_val
|
return ret_val
|
||||||
|
|
||||||
|
@ -2320,6 +2392,9 @@ class Minion(MinionBase):
|
||||||
"""
|
"""
|
||||||
Return the data from the executed command to the master server
|
Return the data from the executed command to the master server
|
||||||
"""
|
"""
|
||||||
|
# XXX: This is only used by syndic and should be moved to the Syndic class.
|
||||||
|
# XXX: The sync flag is only called with sync=False. Which also means
|
||||||
|
# deprecating sync means we can remove Minion._send_req_async.
|
||||||
if not isinstance(rets, list):
|
if not isinstance(rets, list):
|
||||||
rets = [rets]
|
rets = [rets]
|
||||||
jids = {}
|
jids = {}
|
||||||
|
@ -2460,13 +2535,13 @@ class Minion(MinionBase):
|
||||||
# Send an event to the master that the minion is live
|
# Send an event to the master that the minion is live
|
||||||
if self.opts["enable_legacy_startup_events"]:
|
if self.opts["enable_legacy_startup_events"]:
|
||||||
# Old style event. Defaults to False in 3001 release.
|
# Old style event. Defaults to False in 3001 release.
|
||||||
self._fire_master(
|
self._fire_master_main(
|
||||||
"Minion {} started at {}".format(self.opts["id"], time.asctime()),
|
"Minion {} started at {}".format(self.opts["id"], time.asctime()),
|
||||||
"minion_start",
|
"minion_start",
|
||||||
include_startup_grains=include_grains,
|
include_startup_grains=include_grains,
|
||||||
)
|
)
|
||||||
# send name spaced event
|
# send name spaced event
|
||||||
self._fire_master(
|
self._fire_master_main(
|
||||||
"Minion {} started at {}".format(self.opts["id"], time.asctime()),
|
"Minion {} started at {}".format(self.opts["id"], time.asctime()),
|
||||||
tagify([self.opts["id"], "start"], "minion"),
|
tagify([self.opts["id"], "start"], "minion"),
|
||||||
include_startup_grains=include_grains,
|
include_startup_grains=include_grains,
|
||||||
|
@ -2750,21 +2825,35 @@ class Minion(MinionBase):
|
||||||
notify=data.get("notify", False),
|
notify=data.get("notify", False),
|
||||||
)
|
)
|
||||||
elif tag.startswith("__master_req_channel_payload"):
|
elif tag.startswith("__master_req_channel_payload"):
|
||||||
job_master = tag.rsplit("/", 1)[1]
|
request_id, job_master = tag.rsplit("/", 2)[1:]
|
||||||
if job_master == self.opts["master"]:
|
if job_master == self.opts["master"]:
|
||||||
|
ret = None
|
||||||
try:
|
try:
|
||||||
yield _minion.req_channel.send(
|
ret = yield _minion.req_channel.send(
|
||||||
data,
|
data,
|
||||||
timeout=_minion._return_retry_timer(),
|
timeout=_minion._return_retry_timer(),
|
||||||
tries=_minion.opts["return_retry_tries"],
|
tries=_minion.opts["return_retry_tries"],
|
||||||
)
|
)
|
||||||
except salt.exceptions.SaltReqTimeoutError:
|
except salt.exceptions.SaltReqTimeoutError:
|
||||||
log.error("Timeout encountered while sending %r request", data)
|
log.error(
|
||||||
|
"Timeout encountered while sending %r request. id=%s",
|
||||||
|
data,
|
||||||
|
request_id,
|
||||||
|
)
|
||||||
|
raise tornado.gen.Return()
|
||||||
|
with salt.utils.event.get_event(
|
||||||
|
"minion", opts=self.opts, listen=False
|
||||||
|
) as event:
|
||||||
|
yield event.fire_event_async(
|
||||||
|
{"ret": ret},
|
||||||
|
f"__master_req_channel_return/{request_id}",
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
log.debug(
|
log.debug(
|
||||||
"Skipping req for other master: cmd=%s master=%s",
|
"Skipping req for other master: cmd=%s master=%s id=%s",
|
||||||
data["cmd"],
|
data["cmd"],
|
||||||
job_master,
|
job_master,
|
||||||
|
request_id,
|
||||||
)
|
)
|
||||||
elif tag.startswith("pillar_refresh"):
|
elif tag.startswith("pillar_refresh"):
|
||||||
yield _minion.pillar_refresh(
|
yield _minion.pillar_refresh(
|
||||||
|
@ -2792,13 +2881,22 @@ class Minion(MinionBase):
|
||||||
self._mine_send(tag, data)
|
self._mine_send(tag, data)
|
||||||
elif tag.startswith("fire_master"):
|
elif tag.startswith("fire_master"):
|
||||||
if self.connected:
|
if self.connected:
|
||||||
log.debug("Forwarding master event tag=%s", data["tag"])
|
log.debug(
|
||||||
self._fire_master(
|
"Forwarding event %s to master %s",
|
||||||
|
data["tag"],
|
||||||
|
self.opts["master"],
|
||||||
|
)
|
||||||
|
yield self._fire_master_main(
|
||||||
data["data"],
|
data["data"],
|
||||||
data["tag"],
|
data["tag"],
|
||||||
data["events"],
|
data["events"],
|
||||||
data["pretag"],
|
data["pretag"],
|
||||||
sync=False,
|
)
|
||||||
|
else:
|
||||||
|
log.debug(
|
||||||
|
"Master %s is not connected, dropping event %s",
|
||||||
|
self.opts["master"],
|
||||||
|
data["tag"],
|
||||||
)
|
)
|
||||||
elif tag.startswith(master_event(type="disconnected")) or tag.startswith(
|
elif tag.startswith(master_event(type="disconnected")) or tag.startswith(
|
||||||
master_event(type="failback")
|
master_event(type="failback")
|
||||||
|
@ -2866,6 +2964,7 @@ class Minion(MinionBase):
|
||||||
self.req_channel = salt.channel.client.AsyncReqChannel.factory(
|
self.req_channel = salt.channel.client.AsyncReqChannel.factory(
|
||||||
self.opts, io_loop=self.io_loop
|
self.opts, io_loop=self.io_loop
|
||||||
)
|
)
|
||||||
|
yield self.req_channel.connect()
|
||||||
|
|
||||||
# put the current schedule into the new loaders
|
# put the current schedule into the new loaders
|
||||||
self.opts["schedule"] = self.schedule.option("schedule")
|
self.opts["schedule"] = self.schedule.option("schedule")
|
||||||
|
@ -2955,11 +3054,11 @@ class Minion(MinionBase):
|
||||||
1
|
1
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
self._return_pub(data, ret_cmd="_return", sync=False)
|
yield self._return_pub_main(data, ret_cmd="_return")
|
||||||
elif tag.startswith("_salt_error"):
|
elif tag.startswith("_salt_error"):
|
||||||
if self.connected:
|
if self.connected:
|
||||||
log.debug("Forwarding salt error event tag=%s", tag)
|
log.debug("Forwarding salt error event tag=%s", tag)
|
||||||
self._fire_master(data, tag, sync=False)
|
yield self._fire_master_main(data, tag)
|
||||||
elif tag.startswith("salt/auth/creds"):
|
elif tag.startswith("salt/auth/creds"):
|
||||||
key = tuple(data["key"])
|
key = tuple(data["key"])
|
||||||
log.debug(
|
log.debug(
|
||||||
|
@ -2972,7 +3071,7 @@ class Minion(MinionBase):
|
||||||
elif tag.startswith("__beacons_return"):
|
elif tag.startswith("__beacons_return"):
|
||||||
if self.connected:
|
if self.connected:
|
||||||
log.debug("Firing beacons to master")
|
log.debug("Firing beacons to master")
|
||||||
self._fire_master(events=data["beacons"])
|
yield self._fire_master_main(events=data["beacons"])
|
||||||
|
|
||||||
def cleanup_subprocesses(self):
|
def cleanup_subprocesses(self):
|
||||||
"""
|
"""
|
||||||
|
@ -3170,10 +3269,9 @@ class Minion(MinionBase):
|
||||||
"minion is running under an init system."
|
"minion is running under an init system."
|
||||||
)
|
)
|
||||||
|
|
||||||
self._fire_master(
|
self._fire_master_main(
|
||||||
"ping",
|
"ping",
|
||||||
"minion_ping",
|
"minion_ping",
|
||||||
sync=False,
|
|
||||||
timeout_handler=ping_timeout_handler,
|
timeout_handler=ping_timeout_handler,
|
||||||
)
|
)
|
||||||
except Exception: # pylint: disable=broad-except
|
except Exception: # pylint: disable=broad-except
|
||||||
|
@ -3374,12 +3472,10 @@ class Syndic(Minion):
|
||||||
self._fire_master(
|
self._fire_master(
|
||||||
"Syndic {} started at {}".format(self.opts["id"], time.asctime()),
|
"Syndic {} started at {}".format(self.opts["id"], time.asctime()),
|
||||||
"syndic_start",
|
"syndic_start",
|
||||||
sync=False,
|
|
||||||
)
|
)
|
||||||
self._fire_master(
|
self._fire_master(
|
||||||
"Syndic {} started at {}".format(self.opts["id"], time.asctime()),
|
"Syndic {} started at {}".format(self.opts["id"], time.asctime()),
|
||||||
tagify([self.opts["id"], "start"], "syndic"),
|
tagify([self.opts["id"], "start"], "syndic"),
|
||||||
sync=False,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
# TODO: clean up docs
|
# TODO: clean up docs
|
||||||
|
@ -3774,7 +3870,7 @@ class SyndicManager(MinionBase):
|
||||||
"events": events,
|
"events": events,
|
||||||
"pretag": tagify(self.opts["id"], base="syndic"),
|
"pretag": tagify(self.opts["id"], base="syndic"),
|
||||||
"timeout": self._return_retry_timer(),
|
"timeout": self._return_retry_timer(),
|
||||||
"sync": False,
|
"sync": True, # Sync needs to be true unless being called from a coroutine
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
if self.delayed:
|
if self.delayed:
|
||||||
|
|
|
@ -5061,6 +5061,18 @@ def _remove_invalid_xmlns(xml_file):
|
||||||
return xml_tree
|
return xml_tree
|
||||||
|
|
||||||
|
|
||||||
|
def _encode_xmlns_url(match):
|
||||||
|
"""
|
||||||
|
Escape spaces in xmlns urls
|
||||||
|
"""
|
||||||
|
before_xmlns = match.group(1)
|
||||||
|
xmlns = match.group(2)
|
||||||
|
url = match.group(3)
|
||||||
|
after_url = match.group(4)
|
||||||
|
encoded_url = re.sub(r"\s+", "%20", url)
|
||||||
|
return f'{before_xmlns}{xmlns}="{encoded_url}"{after_url}'
|
||||||
|
|
||||||
|
|
||||||
def _parse_xml(adm_file):
|
def _parse_xml(adm_file):
|
||||||
"""
|
"""
|
||||||
Parse the admx/adml file. There are 3 scenarios (so far) that we'll likely
|
Parse the admx/adml file. There are 3 scenarios (so far) that we'll likely
|
||||||
|
@ -5107,6 +5119,12 @@ def _parse_xml(adm_file):
|
||||||
encoding = "utf-16"
|
encoding = "utf-16"
|
||||||
raw = raw.decode(encoding)
|
raw = raw.decode(encoding)
|
||||||
for line in raw.split("\r\n"):
|
for line in raw.split("\r\n"):
|
||||||
|
if 'xmlns="' in line:
|
||||||
|
line = re.sub(
|
||||||
|
r'(.*)(\bxmlns(?::\w+)?)\s*=\s*"([^"]+)"(.*)',
|
||||||
|
_encode_xmlns_url,
|
||||||
|
line,
|
||||||
|
)
|
||||||
if 'key="' in line:
|
if 'key="' in line:
|
||||||
start = line.index('key="')
|
start = line.index('key="')
|
||||||
q1 = line[start:].index('"') + start
|
q1 = line[start:].index('"') + start
|
||||||
|
@ -5744,8 +5762,9 @@ def _set_netsh_value(profile, section, option, value):
|
||||||
salt.utils.win_lgpo_netsh.set_logging_settings(
|
salt.utils.win_lgpo_netsh.set_logging_settings(
|
||||||
profile=profile, setting=option, value=value, store="lgpo"
|
profile=profile, setting=option, value=value, store="lgpo"
|
||||||
)
|
)
|
||||||
log.trace("LGPO: Clearing netsh data for %s profile", profile)
|
if profile in __context__["lgpo.netsh_data"]:
|
||||||
__context__["lgpo.netsh_data"].pop(profile)
|
log.trace("LGPO: Clearing netsh data for %s profile", profile)
|
||||||
|
__context__["lgpo.netsh_data"].pop(profile, {})
|
||||||
return True
|
return True
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -623,7 +623,7 @@ def versions():
|
||||||
|
|
||||||
def bootstrap(
|
def bootstrap(
|
||||||
version="develop",
|
version="develop",
|
||||||
script="https://bootstrap.saltproject.io",
|
script="https://github.com/saltstack/salt-bootstrap/releases/latest/download/bootstrap-salt.sh",
|
||||||
hosts="",
|
hosts="",
|
||||||
script_args="",
|
script_args="",
|
||||||
roster="flat",
|
roster="flat",
|
||||||
|
@ -639,7 +639,7 @@ def bootstrap(
|
||||||
version : develop
|
version : develop
|
||||||
Git tag of version to install
|
Git tag of version to install
|
||||||
|
|
||||||
script : https://bootstrap.saltproject.io/
|
script : https://github.com/saltstack/salt-bootstrap/releases/latest/download/bootstrap-salt.sh
|
||||||
URL containing the script to execute
|
URL containing the script to execute
|
||||||
|
|
||||||
hosts
|
hosts
|
||||||
|
@ -699,8 +699,8 @@ def bootstrap(
|
||||||
.. code-block:: bash
|
.. code-block:: bash
|
||||||
|
|
||||||
salt-run manage.bootstrap hosts='host1,host2'
|
salt-run manage.bootstrap hosts='host1,host2'
|
||||||
salt-run manage.bootstrap hosts='host1,host2' version='v3004.2'
|
salt-run manage.bootstrap hosts='host1,host2' version='v3006.2'
|
||||||
salt-run manage.bootstrap hosts='host1,host2' version='v3004.2' script='https://bootstrap.saltproject.io/develop'
|
salt-run manage.bootstrap hosts='host1,host2' version='v3006.2' script='https://github.com/saltstack/salt-bootstrap/develop'
|
||||||
"""
|
"""
|
||||||
|
|
||||||
client_opts = __opts__.copy()
|
client_opts = __opts__.copy()
|
||||||
|
|
|
@ -317,6 +317,8 @@ class PublishClient(salt.transport.base.PublishClient):
|
||||||
self.backoff,
|
self.backoff,
|
||||||
self._trace,
|
self._trace,
|
||||||
)
|
)
|
||||||
|
if not timeout:
|
||||||
|
raise
|
||||||
if timeout and time.monotonic() - start > timeout:
|
if timeout and time.monotonic() - start > timeout:
|
||||||
break
|
break
|
||||||
await asyncio.sleep(self.backoff)
|
await asyncio.sleep(self.backoff)
|
||||||
|
|
|
@ -73,7 +73,8 @@ class SyncWrapper:
|
||||||
self.cls = cls
|
self.cls = cls
|
||||||
if loop_kwarg:
|
if loop_kwarg:
|
||||||
kwargs[self.loop_kwarg] = self.io_loop
|
kwargs[self.loop_kwarg] = self.io_loop
|
||||||
self.obj = cls(*args, **kwargs)
|
with current_ioloop(self.io_loop):
|
||||||
|
self.obj = cls(*args, **kwargs)
|
||||||
self._async_methods = list(
|
self._async_methods = list(
|
||||||
set(async_methods + getattr(self.obj, "async_methods", []))
|
set(async_methods + getattr(self.obj, "async_methods", []))
|
||||||
)
|
)
|
||||||
|
|
|
@ -2950,7 +2950,10 @@ def update_bootstrap(config, url=None):
|
||||||
- The absolute path to the bootstrap
|
- The absolute path to the bootstrap
|
||||||
- The content of the bootstrap script
|
- The content of the bootstrap script
|
||||||
"""
|
"""
|
||||||
default_url = config.get("bootstrap_script_url", "https://bootstrap.saltstack.com")
|
default_url = config.get(
|
||||||
|
"bootstrap_script_url",
|
||||||
|
"https://github.com/saltstack/salt-bootstrap/releases/latest/download/bootstrap-salt.sh",
|
||||||
|
)
|
||||||
if not url:
|
if not url:
|
||||||
url = default_url
|
url = default_url
|
||||||
if not url:
|
if not url:
|
||||||
|
|
|
@ -76,6 +76,7 @@ import salt.utils.platform
|
||||||
import salt.utils.process
|
import salt.utils.process
|
||||||
import salt.utils.stringutils
|
import salt.utils.stringutils
|
||||||
import salt.utils.zeromq
|
import salt.utils.zeromq
|
||||||
|
from salt.exceptions import SaltInvocationError
|
||||||
from salt.utils.versions import warn_until
|
from salt.utils.versions import warn_until
|
||||||
|
|
||||||
log = logging.getLogger(__name__)
|
log = logging.getLogger(__name__)
|
||||||
|
@ -550,6 +551,9 @@ class SaltEvent:
|
||||||
try:
|
try:
|
||||||
if not self.cpub and not self.connect_pub(timeout=wait):
|
if not self.cpub and not self.connect_pub(timeout=wait):
|
||||||
break
|
break
|
||||||
|
if not self._run_io_loop_sync:
|
||||||
|
log.error("Trying to get event with async subscriber")
|
||||||
|
raise SaltInvocationError("get_event needs synchronous subscriber")
|
||||||
raw = self.subscriber.recv(timeout=wait)
|
raw = self.subscriber.recv(timeout=wait)
|
||||||
if raw is None:
|
if raw is None:
|
||||||
break
|
break
|
||||||
|
|
|
@ -487,11 +487,15 @@ class GitProvider:
|
||||||
).replace(
|
).replace(
|
||||||
"/", "_"
|
"/", "_"
|
||||||
) # replace "/" with "_" to not cause trouble with file system
|
) # replace "/" with "_" to not cause trouble with file system
|
||||||
|
|
||||||
self._cache_hash = salt.utils.path.join(cache_root, self._cache_basehash)
|
self._cache_hash = salt.utils.path.join(cache_root, self._cache_basehash)
|
||||||
self._cache_basename = "_"
|
self._cache_basename = "_"
|
||||||
if self.id.startswith("__env__"):
|
if self.id.startswith("__env__"):
|
||||||
try:
|
try:
|
||||||
self._cache_basename = self.get_checkout_target()
|
self._cache_basename = self.get_checkout_target().replace(
|
||||||
|
"/", "-"
|
||||||
|
) # replace '/' with '-' to not cause trouble with file-system
|
||||||
|
|
||||||
except AttributeError:
|
except AttributeError:
|
||||||
log.critical(
|
log.critical(
|
||||||
"__env__ cant generate basename: %s %s", self.role, self.id
|
"__env__ cant generate basename: %s %s", self.role, self.id
|
||||||
|
@ -529,7 +533,6 @@ class GitProvider:
|
||||||
if HAS_PSUTIL:
|
if HAS_PSUTIL:
|
||||||
cur_pid = os.getpid()
|
cur_pid = os.getpid()
|
||||||
process = psutil.Process(cur_pid)
|
process = psutil.Process(cur_pid)
|
||||||
dgm_process_dir = dir(process)
|
|
||||||
cache_dir = self.opts.get("cachedir", None)
|
cache_dir = self.opts.get("cachedir", None)
|
||||||
gitfs_active = self.opts.get("gitfs_remotes", None)
|
gitfs_active = self.opts.get("gitfs_remotes", None)
|
||||||
if cache_dir and gitfs_active:
|
if cache_dir and gitfs_active:
|
||||||
|
@ -1567,12 +1570,14 @@ class GitPython(GitProvider):
|
||||||
local copy was already up-to-date, return False.
|
local copy was already up-to-date, return False.
|
||||||
"""
|
"""
|
||||||
origin = self.repo.remotes[0]
|
origin = self.repo.remotes[0]
|
||||||
|
|
||||||
try:
|
try:
|
||||||
fetch_results = origin.fetch()
|
fetch_results = origin.fetch()
|
||||||
except AssertionError:
|
except AssertionError:
|
||||||
fetch_results = origin.fetch()
|
fetch_results = origin.fetch()
|
||||||
|
|
||||||
new_objs = False
|
new_objs = False
|
||||||
|
|
||||||
for fetchinfo in fetch_results:
|
for fetchinfo in fetch_results:
|
||||||
if fetchinfo.old_commit is not None:
|
if fetchinfo.old_commit is not None:
|
||||||
log.debug(
|
log.debug(
|
||||||
|
@ -1781,7 +1786,7 @@ class Pygit2(GitProvider):
|
||||||
return None
|
return None
|
||||||
|
|
||||||
try:
|
try:
|
||||||
head_sha = self.peel(local_head).hex
|
head_sha = str(self.peel(local_head).id)
|
||||||
except AttributeError:
|
except AttributeError:
|
||||||
# Shouldn't happen, but just in case a future pygit2 API change
|
# Shouldn't happen, but just in case a future pygit2 API change
|
||||||
# breaks things, avoid a traceback and log an error.
|
# breaks things, avoid a traceback and log an error.
|
||||||
|
@ -1840,7 +1845,10 @@ class Pygit2(GitProvider):
|
||||||
self.repo.create_reference(local_ref, pygit2_id)
|
self.repo.create_reference(local_ref, pygit2_id)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
target_sha = self.peel(self.repo.lookup_reference(remote_ref)).hex
|
target_sha = str(
|
||||||
|
self.peel(self.repo.lookup_reference(remote_ref)).id
|
||||||
|
)
|
||||||
|
|
||||||
except KeyError:
|
except KeyError:
|
||||||
log.error(
|
log.error(
|
||||||
"pygit2 was unable to get SHA for %s in %s remote '%s'",
|
"pygit2 was unable to get SHA for %s in %s remote '%s'",
|
||||||
|
@ -1853,6 +1861,7 @@ class Pygit2(GitProvider):
|
||||||
|
|
||||||
# Only perform a checkout if HEAD and target are not pointing
|
# Only perform a checkout if HEAD and target are not pointing
|
||||||
# at the same SHA1.
|
# at the same SHA1.
|
||||||
|
|
||||||
if head_sha != target_sha:
|
if head_sha != target_sha:
|
||||||
# Check existence of the ref in refs/heads/ which
|
# Check existence of the ref in refs/heads/ which
|
||||||
# corresponds to the local HEAD. Checking out local_ref
|
# corresponds to the local HEAD. Checking out local_ref
|
||||||
|
@ -1921,10 +1930,11 @@ class Pygit2(GitProvider):
|
||||||
else:
|
else:
|
||||||
try:
|
try:
|
||||||
# If no AttributeError raised, this is an annotated tag
|
# If no AttributeError raised, this is an annotated tag
|
||||||
tag_sha = tag_obj.target.hex
|
tag_sha = str(tag_obj.target.id)
|
||||||
|
|
||||||
except AttributeError:
|
except AttributeError:
|
||||||
try:
|
try:
|
||||||
tag_sha = tag_obj.hex
|
tag_sha = str(tag_obj.id)
|
||||||
except AttributeError:
|
except AttributeError:
|
||||||
# Shouldn't happen, but could if a future pygit2
|
# Shouldn't happen, but could if a future pygit2
|
||||||
# API change breaks things.
|
# API change breaks things.
|
||||||
|
@ -2106,6 +2116,7 @@ class Pygit2(GitProvider):
|
||||||
origin = self.repo.remotes[0]
|
origin = self.repo.remotes[0]
|
||||||
refs_pre = self.repo.listall_references()
|
refs_pre = self.repo.listall_references()
|
||||||
fetch_kwargs = {}
|
fetch_kwargs = {}
|
||||||
|
|
||||||
# pygit2 radically changed fetchiing in 0.23.2
|
# pygit2 radically changed fetchiing in 0.23.2
|
||||||
if self.remotecallbacks is not None:
|
if self.remotecallbacks is not None:
|
||||||
fetch_kwargs["callbacks"] = self.remotecallbacks
|
fetch_kwargs["callbacks"] = self.remotecallbacks
|
||||||
|
@ -2119,6 +2130,7 @@ class Pygit2(GitProvider):
|
||||||
pass
|
pass
|
||||||
try:
|
try:
|
||||||
fetch_results = origin.fetch(**fetch_kwargs)
|
fetch_results = origin.fetch(**fetch_kwargs)
|
||||||
|
|
||||||
except GitError as exc: # pylint: disable=broad-except
|
except GitError as exc: # pylint: disable=broad-except
|
||||||
exc_str = get_error_message(exc).lower()
|
exc_str = get_error_message(exc).lower()
|
||||||
if "unsupported url protocol" in exc_str and isinstance(
|
if "unsupported url protocol" in exc_str and isinstance(
|
||||||
|
@ -2157,6 +2169,7 @@ class Pygit2(GitProvider):
|
||||||
# pygit2.Remote.fetch() returns a class instance in
|
# pygit2.Remote.fetch() returns a class instance in
|
||||||
# pygit2 >= 0.21.0
|
# pygit2 >= 0.21.0
|
||||||
received_objects = fetch_results.received_objects
|
received_objects = fetch_results.received_objects
|
||||||
|
|
||||||
if received_objects != 0:
|
if received_objects != 0:
|
||||||
log.debug(
|
log.debug(
|
||||||
"%s received %s objects for remote '%s'",
|
"%s received %s objects for remote '%s'",
|
||||||
|
@ -2168,6 +2181,7 @@ class Pygit2(GitProvider):
|
||||||
log.debug("%s remote '%s' is up-to-date", self.role, self.id)
|
log.debug("%s remote '%s' is up-to-date", self.role, self.id)
|
||||||
refs_post = self.repo.listall_references()
|
refs_post = self.repo.listall_references()
|
||||||
cleaned = self.clean_stale_refs(local_refs=refs_post)
|
cleaned = self.clean_stale_refs(local_refs=refs_post)
|
||||||
|
|
||||||
return True if (received_objects or refs_pre != refs_post or cleaned) else None
|
return True if (received_objects or refs_pre != refs_post or cleaned) else None
|
||||||
|
|
||||||
def file_list(self, tgt_env):
|
def file_list(self, tgt_env):
|
||||||
|
@ -2278,7 +2292,7 @@ class Pygit2(GitProvider):
|
||||||
blob = None
|
blob = None
|
||||||
break
|
break
|
||||||
if isinstance(blob, pygit2.Blob):
|
if isinstance(blob, pygit2.Blob):
|
||||||
return blob, blob.hex, mode
|
return blob, str(blob.id), mode
|
||||||
return None, None, None
|
return None, None, None
|
||||||
|
|
||||||
def get_tree_from_branch(self, ref):
|
def get_tree_from_branch(self, ref):
|
||||||
|
@ -3481,6 +3495,7 @@ class GitPillar(GitBase):
|
||||||
"""
|
"""
|
||||||
self.pillar_dirs = OrderedDict()
|
self.pillar_dirs = OrderedDict()
|
||||||
self.pillar_linked_dirs = []
|
self.pillar_linked_dirs = []
|
||||||
|
|
||||||
for repo in self.remotes:
|
for repo in self.remotes:
|
||||||
cachedir = self.do_checkout(repo, fetch_on_fail=fetch_on_fail)
|
cachedir = self.do_checkout(repo, fetch_on_fail=fetch_on_fail)
|
||||||
if cachedir is not None:
|
if cachedir is not None:
|
||||||
|
|
|
@ -110,20 +110,38 @@ def _get_inbound_text(rule, action):
|
||||||
The "Inbound connections" setting is a combination of 2 parameters:
|
The "Inbound connections" setting is a combination of 2 parameters:
|
||||||
|
|
||||||
- AllowInboundRules
|
- AllowInboundRules
|
||||||
|
0 = False
|
||||||
|
1 = True
|
||||||
|
2 = NotConfigured
|
||||||
|
I don't see a way to set "AllowInboundRules" outside of PowerShell
|
||||||
|
|
||||||
- DefaultInboundAction
|
- DefaultInboundAction
|
||||||
|
0 = Not Configured
|
||||||
|
2 = Allow Inbound
|
||||||
|
4 = Block Inbound
|
||||||
|
|
||||||
The settings are as follows:
|
The settings are as follows:
|
||||||
|
|
||||||
Rules Action
|
Rules Action
|
||||||
|
0 4 BlockInboundAlways
|
||||||
|
1 0 NotConfigured
|
||||||
|
1 2 AllowInbound
|
||||||
|
1 4 BlockInbound
|
||||||
|
2 0 NotConfigured
|
||||||
2 2 AllowInbound
|
2 2 AllowInbound
|
||||||
2 4 BlockInbound
|
2 4 BlockInbound
|
||||||
0 4 BlockInboundAlways
|
|
||||||
2 0 NotConfigured
|
|
||||||
"""
|
"""
|
||||||
settings = {
|
settings = {
|
||||||
0: {
|
0: {
|
||||||
|
0: "NotConfigured",
|
||||||
|
2: "AllowInbound",
|
||||||
4: "BlockInboundAlways",
|
4: "BlockInboundAlways",
|
||||||
},
|
},
|
||||||
|
1: {
|
||||||
|
0: "NotConfigured",
|
||||||
|
2: "AllowInbound",
|
||||||
|
4: "BlockInbound",
|
||||||
|
},
|
||||||
2: {
|
2: {
|
||||||
0: "NotConfigured",
|
0: "NotConfigured",
|
||||||
2: "AllowInbound",
|
2: "AllowInbound",
|
||||||
|
@ -143,6 +161,30 @@ def _get_inbound_settings(text):
|
||||||
return settings[text.lower()]
|
return settings[text.lower()]
|
||||||
|
|
||||||
|
|
||||||
|
def _get_all_settings(profile, store="local"):
|
||||||
|
# Get current settings using PowerShell
|
||||||
|
# if "lgpo.firewall_profile_settings" not in __context__:
|
||||||
|
cmd = ["Get-NetFirewallProfile"]
|
||||||
|
if profile:
|
||||||
|
cmd.append(profile)
|
||||||
|
if store.lower() == "lgpo":
|
||||||
|
cmd.extend(["-PolicyStore", "localhost"])
|
||||||
|
|
||||||
|
# Run the command and get dict
|
||||||
|
settings = salt.utils.win_pwsh.run_dict(cmd)
|
||||||
|
|
||||||
|
# A successful run should return a dictionary
|
||||||
|
if not settings:
|
||||||
|
raise CommandExecutionError("LGPO NETSH: An unknown error occurred")
|
||||||
|
|
||||||
|
# Remove the junk
|
||||||
|
for setting in list(settings.keys()):
|
||||||
|
if setting.startswith("Cim"):
|
||||||
|
settings.pop(setting)
|
||||||
|
|
||||||
|
return settings
|
||||||
|
|
||||||
|
|
||||||
def get_settings(profile, section, store="local"):
|
def get_settings(profile, section, store="local"):
|
||||||
"""
|
"""
|
||||||
Get the firewall property from the specified profile in the specified store
|
Get the firewall property from the specified profile in the specified store
|
||||||
|
@ -190,24 +232,7 @@ def get_settings(profile, section, store="local"):
|
||||||
if store.lower() not in ("local", "lgpo"):
|
if store.lower() not in ("local", "lgpo"):
|
||||||
raise ValueError(f"Incorrect store: {store}")
|
raise ValueError(f"Incorrect store: {store}")
|
||||||
|
|
||||||
# Build the powershell command
|
settings = _get_all_settings(profile=profile, store=store)
|
||||||
cmd = ["Get-NetFirewallProfile"]
|
|
||||||
if profile:
|
|
||||||
cmd.append(profile)
|
|
||||||
if store and store.lower() == "lgpo":
|
|
||||||
cmd.extend(["-PolicyStore", "localhost"])
|
|
||||||
|
|
||||||
# Run the command
|
|
||||||
settings = salt.utils.win_pwsh.run_dict(cmd)
|
|
||||||
|
|
||||||
# A successful run should return a dictionary
|
|
||||||
if not settings:
|
|
||||||
raise CommandExecutionError("LGPO NETSH: An unknown error occurred")
|
|
||||||
|
|
||||||
# Remove the junk
|
|
||||||
for setting in list(settings.keys()):
|
|
||||||
if setting.startswith("Cim"):
|
|
||||||
settings.pop(setting)
|
|
||||||
|
|
||||||
# Make it look like netsh output
|
# Make it look like netsh output
|
||||||
ret_settings = {
|
ret_settings = {
|
||||||
|
@ -299,24 +324,7 @@ def get_all_settings(profile, store="local"):
|
||||||
if store.lower() not in ("local", "lgpo"):
|
if store.lower() not in ("local", "lgpo"):
|
||||||
raise ValueError(f"Incorrect store: {store}")
|
raise ValueError(f"Incorrect store: {store}")
|
||||||
|
|
||||||
# Build the powershell command
|
settings = _get_all_settings(profile=profile, store=store)
|
||||||
cmd = ["Get-NetFirewallProfile"]
|
|
||||||
if profile:
|
|
||||||
cmd.append(profile)
|
|
||||||
if store and store.lower() == "lgpo":
|
|
||||||
cmd.extend(["-PolicyStore", "localhost"])
|
|
||||||
|
|
||||||
# Run the command
|
|
||||||
settings = salt.utils.win_pwsh.run_dict(cmd)
|
|
||||||
|
|
||||||
# A successful run should return a dictionary
|
|
||||||
if not settings:
|
|
||||||
raise CommandExecutionError("LGPO NETSH: An unknown error occurred")
|
|
||||||
|
|
||||||
# Remove the junk
|
|
||||||
for setting in list(settings.keys()):
|
|
||||||
if setting.startswith("Cim"):
|
|
||||||
settings.pop(setting)
|
|
||||||
|
|
||||||
# Make it look like netsh output
|
# Make it look like netsh output
|
||||||
ret_settings = {
|
ret_settings = {
|
||||||
|
@ -409,6 +417,9 @@ def set_firewall_settings(profile, inbound=None, outbound=None, store="local"):
|
||||||
raise ValueError(f"Incorrect outbound value: {outbound}")
|
raise ValueError(f"Incorrect outbound value: {outbound}")
|
||||||
if not inbound and not outbound:
|
if not inbound and not outbound:
|
||||||
raise ValueError("Must set inbound or outbound")
|
raise ValueError("Must set inbound or outbound")
|
||||||
|
|
||||||
|
# https://learn.microsoft.com/en-us/powershell/module/netsecurity/set-netfirewallprofile?view=windowsserver2025-ps#-allowinboundrules
|
||||||
|
# https://learn.microsoft.com/en-us/powershell/module/netsecurity/set-netfirewallprofile?view=windowsserver2025-ps#-defaultoutboundaction
|
||||||
if store == "local":
|
if store == "local":
|
||||||
if inbound and inbound.lower() == "notconfigured":
|
if inbound and inbound.lower() == "notconfigured":
|
||||||
msg = "Cannot set local inbound policies as NotConfigured"
|
msg = "Cannot set local inbound policies as NotConfigured"
|
||||||
|
@ -417,16 +428,26 @@ def set_firewall_settings(profile, inbound=None, outbound=None, store="local"):
|
||||||
msg = "Cannot set local outbound policies as NotConfigured"
|
msg = "Cannot set local outbound policies as NotConfigured"
|
||||||
raise CommandExecutionError(msg)
|
raise CommandExecutionError(msg)
|
||||||
|
|
||||||
|
# Get current settings
|
||||||
|
settings = _get_all_settings(profile=profile, store=store)
|
||||||
|
|
||||||
# Build the powershell command
|
# Build the powershell command
|
||||||
cmd = ["Set-NetFirewallProfile"]
|
cmd = ["Set-NetFirewallProfile"]
|
||||||
if profile:
|
if profile:
|
||||||
cmd.append(profile)
|
cmd.append(profile)
|
||||||
if store and store.lower() == "lgpo":
|
if store.lower() == "lgpo":
|
||||||
cmd.extend(["-PolicyStore", "localhost"])
|
cmd.extend(["-PolicyStore", "localhost"])
|
||||||
|
|
||||||
# Get inbound settings
|
# Get inbound settings
|
||||||
if inbound:
|
if inbound:
|
||||||
in_rule, in_action = _get_inbound_settings(inbound.lower())
|
in_rule, in_action = _get_inbound_settings(inbound.lower())
|
||||||
|
# If current AllowInboundRules is set (1 or 2) and new AllowInboundRules is 2
|
||||||
|
# We want to just keep the current setting.
|
||||||
|
# We don't have a way in LGPO to set the AllowInboundRules. I can't find it in
|
||||||
|
# gpedit.msc either. Not sure how to set it outside of PowerShell
|
||||||
|
current_in_rule = settings["AllowInboundRules"]
|
||||||
|
if current_in_rule > 0 and in_rule == 2:
|
||||||
|
in_rule = current_in_rule
|
||||||
cmd.extend(["-AllowInboundRules", in_rule, "-DefaultInboundAction", in_action])
|
cmd.extend(["-AllowInboundRules", in_rule, "-DefaultInboundAction", in_action])
|
||||||
|
|
||||||
if outbound:
|
if outbound:
|
||||||
|
@ -509,10 +530,6 @@ def set_logging_settings(profile, setting, value, store="local"):
|
||||||
# Input validation
|
# Input validation
|
||||||
if profile.lower() not in ("domain", "public", "private"):
|
if profile.lower() not in ("domain", "public", "private"):
|
||||||
raise ValueError(f"Incorrect profile: {profile}")
|
raise ValueError(f"Incorrect profile: {profile}")
|
||||||
if store == "local":
|
|
||||||
if str(value).lower() == "notconfigured":
|
|
||||||
msg = "Cannot set local policies as NotConfigured"
|
|
||||||
raise CommandExecutionError(msg)
|
|
||||||
if setting.lower() not in (
|
if setting.lower() not in (
|
||||||
"allowedconnections",
|
"allowedconnections",
|
||||||
"droppedconnections",
|
"droppedconnections",
|
||||||
|
@ -520,6 +537,18 @@ def set_logging_settings(profile, setting, value, store="local"):
|
||||||
"maxfilesize",
|
"maxfilesize",
|
||||||
):
|
):
|
||||||
raise ValueError(f"Incorrect setting: {setting}")
|
raise ValueError(f"Incorrect setting: {setting}")
|
||||||
|
|
||||||
|
# https://learn.microsoft.com/en-us/powershell/module/netsecurity/set-netfirewallprofile?view=windowsserver2025-ps#-logallowed
|
||||||
|
# https://learn.microsoft.com/en-us/powershell/module/netsecurity/set-netfirewallprofile?view=windowsserver2025-ps#-logblocked
|
||||||
|
# https://learn.microsoft.com/en-us/powershell/module/netsecurity/set-netfirewallprofile?view=windowsserver2025-ps#-logmaxsizekilobytes
|
||||||
|
if str(value).lower() == "notconfigured" and store.lower() == "local":
|
||||||
|
if setting in ["allowedconnections", "droppedconnections", "maxfilesize"]:
|
||||||
|
raise CommandExecutionError(
|
||||||
|
"NotConfigured only valid when setting Group Policy"
|
||||||
|
)
|
||||||
|
if setting == "maxfilesize" and str(value).lower() == "notconfigured":
|
||||||
|
raise CommandExecutionError(f"NotConfigured not a valid option for {setting}")
|
||||||
|
|
||||||
settings = {"filename": ["-LogFileName", value]}
|
settings = {"filename": ["-LogFileName", value]}
|
||||||
if setting.lower() in ("allowedconnections", "droppedconnections"):
|
if setting.lower() in ("allowedconnections", "droppedconnections"):
|
||||||
if value.lower() not in ("enable", "disable", "notconfigured"):
|
if value.lower() not in ("enable", "disable", "notconfigured"):
|
||||||
|
@ -588,7 +617,7 @@ def set_settings(profile, setting, value, store="local"):
|
||||||
|
|
||||||
- enable
|
- enable
|
||||||
- disable
|
- disable
|
||||||
- notconfigured
|
- notconfigured <== lgpo only
|
||||||
|
|
||||||
store (str):
|
store (str):
|
||||||
The store to use. This is either the local firewall policy or the
|
The store to use. This is either the local firewall policy or the
|
||||||
|
@ -618,20 +647,19 @@ def set_settings(profile, setting, value, store="local"):
|
||||||
raise ValueError(f"Incorrect setting: {setting}")
|
raise ValueError(f"Incorrect setting: {setting}")
|
||||||
if value.lower() not in ("enable", "disable", "notconfigured"):
|
if value.lower() not in ("enable", "disable", "notconfigured"):
|
||||||
raise ValueError(f"Incorrect value: {value}")
|
raise ValueError(f"Incorrect value: {value}")
|
||||||
if setting.lower() in ["localfirewallrules", "localconsecrules"]:
|
# https://learn.microsoft.com/en-us/powershell/module/netsecurity/set-netfirewallprofile?view=windowsserver2025-ps#-allowlocalfirewallrules
|
||||||
if store.lower() != "lgpo":
|
# https://learn.microsoft.com/en-us/powershell/module/netsecurity/set-netfirewallprofile?view=windowsserver2025-ps#-allowlocalipsecrules
|
||||||
msg = f"{setting} can only be set using Group Policy"
|
# https://learn.microsoft.com/en-us/powershell/module/netsecurity/set-netfirewallprofile?view=windowsserver2025-ps#-allowunicastresponsetomulticast
|
||||||
raise CommandExecutionError(msg)
|
# https://learn.microsoft.com/en-us/powershell/module/netsecurity/set-netfirewallprofile?view=windowsserver2025-ps#-notifyonlisten
|
||||||
if setting.lower() == "inboundusernotification" and store.lower() != "lgpo":
|
if value.lower() == "notconfigured" and store.lower() == "local":
|
||||||
if value.lower() == "notconfigured":
|
msg = "NotConfigured is only valid when setting group policy"
|
||||||
msg = "NotConfigured is only valid when setting group policy"
|
raise CommandExecutionError(msg)
|
||||||
raise CommandExecutionError(msg)
|
|
||||||
|
|
||||||
# Build the powershell command
|
# Build the powershell command
|
||||||
cmd = ["Set-NetFirewallProfile"]
|
cmd = ["Set-NetFirewallProfile"]
|
||||||
if profile:
|
if profile:
|
||||||
cmd.append(profile)
|
cmd.append(profile)
|
||||||
if store and store.lower() == "lgpo":
|
if store.lower() == "lgpo":
|
||||||
cmd.extend(["-PolicyStore", "localhost"])
|
cmd.extend(["-PolicyStore", "localhost"])
|
||||||
|
|
||||||
settings = {
|
settings = {
|
||||||
|
@ -706,7 +734,7 @@ def set_state(profile, state, store="local"):
|
||||||
cmd = ["Set-NetFirewallProfile"]
|
cmd = ["Set-NetFirewallProfile"]
|
||||||
if profile:
|
if profile:
|
||||||
cmd.append(profile)
|
cmd.append(profile)
|
||||||
if store and store.lower() == "lgpo":
|
if store.lower() == "lgpo":
|
||||||
cmd.extend(["-PolicyStore", "localhost"])
|
cmd.extend(["-PolicyStore", "localhost"])
|
||||||
|
|
||||||
cmd.extend(["-Enabled", ON_OFF[state.lower()]])
|
cmd.extend(["-Enabled", ON_OFF[state.lower()]])
|
||||||
|
|
|
@ -2,6 +2,7 @@ import pytest
|
||||||
|
|
||||||
from salt.pillar.git_pillar import ext_pillar
|
from salt.pillar.git_pillar import ext_pillar
|
||||||
from salt.utils.immutabletypes import ImmutableDict, ImmutableList
|
from salt.utils.immutabletypes import ImmutableDict, ImmutableList
|
||||||
|
from salt.utils.odict import OrderedDict
|
||||||
from tests.support.mock import patch
|
from tests.support.mock import patch
|
||||||
|
|
||||||
pytestmark = [
|
pytestmark = [
|
||||||
|
@ -260,3 +261,38 @@ def test_gitpython_multiple_2(gitpython_pillar_opts, grains):
|
||||||
@skipif_no_pygit2
|
@skipif_no_pygit2
|
||||||
def test_pygit2_multiple_2(pygit2_pillar_opts, grains):
|
def test_pygit2_multiple_2(pygit2_pillar_opts, grains):
|
||||||
_test_multiple_2(pygit2_pillar_opts, grains)
|
_test_multiple_2(pygit2_pillar_opts, grains)
|
||||||
|
|
||||||
|
|
||||||
|
def _test_multiple_slash_in_branch_name(pillar_opts, grains):
|
||||||
|
pillar_opts["pillarenv"] = "doggy/moggy"
|
||||||
|
data = _get_ext_pillar(
|
||||||
|
"minion",
|
||||||
|
pillar_opts,
|
||||||
|
grains,
|
||||||
|
"__env__ https://github.com/saltstack/salt-test-pillar-gitfs.git",
|
||||||
|
"other https://github.com/saltstack/salt-test-pillar-gitfs-2.git",
|
||||||
|
)
|
||||||
|
assert data == {
|
||||||
|
"key": "data",
|
||||||
|
"foo": OrderedDict(
|
||||||
|
[
|
||||||
|
("animals", OrderedDict([("breed", "seadog")])),
|
||||||
|
(
|
||||||
|
"feature/baz",
|
||||||
|
OrderedDict(
|
||||||
|
[("test1", "dog"), ("test2", "kat"), ("test3", "gerbil")]
|
||||||
|
),
|
||||||
|
),
|
||||||
|
]
|
||||||
|
),
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@skipif_no_gitpython
|
||||||
|
def test_gitpython_multiple_slash_in_branch_name(gitpython_pillar_opts, grains):
|
||||||
|
_test_multiple_slash_in_branch_name(gitpython_pillar_opts, grains)
|
||||||
|
|
||||||
|
|
||||||
|
@skipif_no_pygit2
|
||||||
|
def test_pygit2_multiple_slash_in_branch_name(pygit2_pillar_opts, grains):
|
||||||
|
_test_multiple_slash_in_branch_name(pygit2_pillar_opts, grains)
|
||||||
|
|
|
@ -32,6 +32,8 @@ except ImportError:
|
||||||
skipif_no_gitpython = pytest.mark.skipif(not HAS_GITPYTHON, reason="Missing gitpython")
|
skipif_no_gitpython = pytest.mark.skipif(not HAS_GITPYTHON, reason="Missing gitpython")
|
||||||
skipif_no_pygit2 = pytest.mark.skipif(not HAS_PYGIT2, reason="Missing pygit2")
|
skipif_no_pygit2 = pytest.mark.skipif(not HAS_PYGIT2, reason="Missing pygit2")
|
||||||
|
|
||||||
|
testgitfs = "https://github.com/saltstack/salt-test-pillar-gitfs.git"
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture
|
@pytest.fixture
|
||||||
def pillar_opts(salt_factories, tmp_path):
|
def pillar_opts(salt_factories, tmp_path):
|
||||||
|
@ -72,9 +74,7 @@ def _get_pillar(opts, *remotes):
|
||||||
|
|
||||||
@skipif_no_gitpython
|
@skipif_no_gitpython
|
||||||
def test_gitpython_pillar_provider(gitpython_pillar_opts):
|
def test_gitpython_pillar_provider(gitpython_pillar_opts):
|
||||||
p = _get_pillar(
|
p = _get_pillar(gitpython_pillar_opts, testgitfs)
|
||||||
gitpython_pillar_opts, "https://github.com/saltstack/salt-test-pillar-gitfs.git"
|
|
||||||
)
|
|
||||||
assert len(p.remotes) == 1
|
assert len(p.remotes) == 1
|
||||||
assert p.provider == "gitpython"
|
assert p.provider == "gitpython"
|
||||||
assert isinstance(p.remotes[0], GitPython)
|
assert isinstance(p.remotes[0], GitPython)
|
||||||
|
@ -82,18 +82,14 @@ def test_gitpython_pillar_provider(gitpython_pillar_opts):
|
||||||
|
|
||||||
@skipif_no_pygit2
|
@skipif_no_pygit2
|
||||||
def test_pygit2_pillar_provider(pygit2_pillar_opts):
|
def test_pygit2_pillar_provider(pygit2_pillar_opts):
|
||||||
p = _get_pillar(
|
p = _get_pillar(pygit2_pillar_opts, testgitfs)
|
||||||
pygit2_pillar_opts, "https://github.com/saltstack/salt-test-pillar-gitfs.git"
|
|
||||||
)
|
|
||||||
assert len(p.remotes) == 1
|
assert len(p.remotes) == 1
|
||||||
assert p.provider == "pygit2"
|
assert p.provider == "pygit2"
|
||||||
assert isinstance(p.remotes[0], Pygit2)
|
assert isinstance(p.remotes[0], Pygit2)
|
||||||
|
|
||||||
|
|
||||||
def _test_env(opts):
|
def _test_env(opts):
|
||||||
p = _get_pillar(
|
p = _get_pillar(opts, f"__env__ {testgitfs}")
|
||||||
opts, "__env__ https://github.com/saltstack/salt-test-pillar-gitfs.git"
|
|
||||||
)
|
|
||||||
assert len(p.remotes) == 1
|
assert len(p.remotes) == 1
|
||||||
p.checkout()
|
p.checkout()
|
||||||
repo = p.remotes[0]
|
repo = p.remotes[0]
|
||||||
|
@ -102,9 +98,7 @@ def _test_env(opts):
|
||||||
for f in (".gitignore", "README.md", "file.sls", "top.sls"):
|
for f in (".gitignore", "README.md", "file.sls", "top.sls"):
|
||||||
assert f in files
|
assert f in files
|
||||||
opts["pillarenv"] = "main"
|
opts["pillarenv"] = "main"
|
||||||
p2 = _get_pillar(
|
p2 = _get_pillar(opts, f"__env__ {testgitfs}")
|
||||||
opts, "__env__ https://github.com/saltstack/salt-test-pillar-gitfs.git"
|
|
||||||
)
|
|
||||||
assert len(p.remotes) == 1
|
assert len(p.remotes) == 1
|
||||||
p2.checkout()
|
p2.checkout()
|
||||||
repo2 = p2.remotes[0]
|
repo2 = p2.remotes[0]
|
||||||
|
@ -165,9 +159,9 @@ def test_pygit2_checkout_fetch_on_fail(pygit2_pillar_opts):
|
||||||
def _test_multiple_repos(opts):
|
def _test_multiple_repos(opts):
|
||||||
p = _get_pillar(
|
p = _get_pillar(
|
||||||
opts,
|
opts,
|
||||||
"__env__ https://github.com/saltstack/salt-test-pillar-gitfs.git",
|
f"__env__ {testgitfs}",
|
||||||
"main https://github.com/saltstack/salt-test-pillar-gitfs.git",
|
f"main {testgitfs}",
|
||||||
"branch https://github.com/saltstack/salt-test-pillar-gitfs.git",
|
f"branch {testgitfs}",
|
||||||
"__env__ https://github.com/saltstack/salt-test-pillar-gitfs-2.git",
|
"__env__ https://github.com/saltstack/salt-test-pillar-gitfs-2.git",
|
||||||
"other https://github.com/saltstack/salt-test-pillar-gitfs-2.git",
|
"other https://github.com/saltstack/salt-test-pillar-gitfs-2.git",
|
||||||
)
|
)
|
||||||
|
@ -179,9 +173,9 @@ def _test_multiple_repos(opts):
|
||||||
|
|
||||||
p2 = _get_pillar(
|
p2 = _get_pillar(
|
||||||
opts,
|
opts,
|
||||||
"__env__ https://github.com/saltstack/salt-test-pillar-gitfs.git",
|
f"__env__ {testgitfs}",
|
||||||
"main https://github.com/saltstack/salt-test-pillar-gitfs.git",
|
f"main {testgitfs}",
|
||||||
"branch https://github.com/saltstack/salt-test-pillar-gitfs.git",
|
f"branch {testgitfs}",
|
||||||
"__env__ https://github.com/saltstack/salt-test-pillar-gitfs-2.git",
|
"__env__ https://github.com/saltstack/salt-test-pillar-gitfs-2.git",
|
||||||
"other https://github.com/saltstack/salt-test-pillar-gitfs-2.git",
|
"other https://github.com/saltstack/salt-test-pillar-gitfs-2.git",
|
||||||
)
|
)
|
||||||
|
@ -194,9 +188,9 @@ def _test_multiple_repos(opts):
|
||||||
opts["pillarenv"] = "main"
|
opts["pillarenv"] = "main"
|
||||||
p3 = _get_pillar(
|
p3 = _get_pillar(
|
||||||
opts,
|
opts,
|
||||||
"__env__ https://github.com/saltstack/salt-test-pillar-gitfs.git",
|
f"__env__ {testgitfs}",
|
||||||
"main https://github.com/saltstack/salt-test-pillar-gitfs.git",
|
f"main {testgitfs}",
|
||||||
"branch https://github.com/saltstack/salt-test-pillar-gitfs.git",
|
f"branch {testgitfs}",
|
||||||
"__env__ https://github.com/saltstack/salt-test-pillar-gitfs-2.git",
|
"__env__ https://github.com/saltstack/salt-test-pillar-gitfs-2.git",
|
||||||
"other https://github.com/saltstack/salt-test-pillar-gitfs-2.git",
|
"other https://github.com/saltstack/salt-test-pillar-gitfs-2.git",
|
||||||
)
|
)
|
||||||
|
@ -227,15 +221,13 @@ def test_pygit2_multiple_repos(pygit2_pillar_opts):
|
||||||
def _test_fetch_request(opts):
|
def _test_fetch_request(opts):
|
||||||
p = _get_pillar(
|
p = _get_pillar(
|
||||||
opts,
|
opts,
|
||||||
"__env__ https://github.com/saltstack/salt-test-pillar-gitfs.git",
|
f"__env__ {testgitfs}",
|
||||||
"other https://github.com/saltstack/salt-test-pillar-gitfs-2.git",
|
"other https://github.com/saltstack/salt-test-pillar-gitfs-2.git",
|
||||||
)
|
)
|
||||||
frequest = os.path.join(p.remotes[0].get_salt_working_dir(), "fetch_request")
|
frequest = os.path.join(p.remotes[0].get_salt_working_dir(), "fetch_request")
|
||||||
frequest_other = os.path.join(p.remotes[1].get_salt_working_dir(), "fetch_request")
|
frequest_other = os.path.join(p.remotes[1].get_salt_working_dir(), "fetch_request")
|
||||||
opts["pillarenv"] = "main"
|
opts["pillarenv"] = "main"
|
||||||
p2 = _get_pillar(
|
p2 = _get_pillar(opts, f"__env__ {testgitfs}")
|
||||||
opts, "__env__ https://github.com/saltstack/salt-test-pillar-gitfs.git"
|
|
||||||
)
|
|
||||||
frequest2 = os.path.join(p2.remotes[0].get_salt_working_dir(), "fetch_request")
|
frequest2 = os.path.join(p2.remotes[0].get_salt_working_dir(), "fetch_request")
|
||||||
assert frequest != frequest2
|
assert frequest != frequest2
|
||||||
assert os.path.isfile(frequest) is False
|
assert os.path.isfile(frequest) is False
|
||||||
|
@ -277,15 +269,13 @@ def test_pygit2_fetch_request(pygit2_pillar_opts):
|
||||||
def _test_clear_old_remotes(opts):
|
def _test_clear_old_remotes(opts):
|
||||||
p = _get_pillar(
|
p = _get_pillar(
|
||||||
opts,
|
opts,
|
||||||
"__env__ https://github.com/saltstack/salt-test-pillar-gitfs.git",
|
f"__env__ {testgitfs}",
|
||||||
"other https://github.com/saltstack/salt-test-pillar-gitfs-2.git",
|
"other https://github.com/saltstack/salt-test-pillar-gitfs-2.git",
|
||||||
)
|
)
|
||||||
repo = p.remotes[0]
|
repo = p.remotes[0]
|
||||||
repo2 = p.remotes[1]
|
repo2 = p.remotes[1]
|
||||||
opts["pillarenv"] = "main"
|
opts["pillarenv"] = "main"
|
||||||
p2 = _get_pillar(
|
p2 = _get_pillar(opts, f"__env__ {testgitfs}")
|
||||||
opts, "__env__ https://github.com/saltstack/salt-test-pillar-gitfs.git"
|
|
||||||
)
|
|
||||||
repo3 = p2.remotes[0]
|
repo3 = p2.remotes[0]
|
||||||
assert os.path.isdir(repo.get_cachedir()) is True
|
assert os.path.isdir(repo.get_cachedir()) is True
|
||||||
assert os.path.isdir(repo2.get_cachedir()) is True
|
assert os.path.isdir(repo2.get_cachedir()) is True
|
||||||
|
@ -313,7 +303,7 @@ def test_pygit2_clear_old_remotes(pygit2_pillar_opts):
|
||||||
def _test_remote_map(opts):
|
def _test_remote_map(opts):
|
||||||
p = _get_pillar(
|
p = _get_pillar(
|
||||||
opts,
|
opts,
|
||||||
"https://github.com/saltstack/salt-test-pillar-gitfs.git",
|
testgitfs,
|
||||||
)
|
)
|
||||||
p.fetch_remotes()
|
p.fetch_remotes()
|
||||||
assert len(p.remotes) == 1
|
assert len(p.remotes) == 1
|
||||||
|
@ -335,7 +325,7 @@ def test_pygit2_remote_map(pygit2_pillar_opts):
|
||||||
def _test_lock(opts):
|
def _test_lock(opts):
|
||||||
p = _get_pillar(
|
p = _get_pillar(
|
||||||
opts,
|
opts,
|
||||||
"https://github.com/saltstack/salt-test-pillar-gitfs.git",
|
testgitfs,
|
||||||
)
|
)
|
||||||
p.fetch_remotes()
|
p.fetch_remotes()
|
||||||
assert len(p.remotes) == 1
|
assert len(p.remotes) == 1
|
||||||
|
@ -345,8 +335,7 @@ def _test_lock(opts):
|
||||||
assert repo.lock() == (
|
assert repo.lock() == (
|
||||||
[
|
[
|
||||||
(
|
(
|
||||||
f"Set update lock for git_pillar remote "
|
f"Set update lock for git_pillar remote '{testgitfs}' on machine_id '{mach_id}'"
|
||||||
f"'https://github.com/saltstack/salt-test-pillar-gitfs.git' on machine_id '{mach_id}'"
|
|
||||||
)
|
)
|
||||||
],
|
],
|
||||||
[],
|
[],
|
||||||
|
@ -355,8 +344,7 @@ def _test_lock(opts):
|
||||||
assert repo.clear_lock() == (
|
assert repo.clear_lock() == (
|
||||||
[
|
[
|
||||||
(
|
(
|
||||||
f"Removed update lock for git_pillar remote "
|
f"Removed update lock for git_pillar remote '{testgitfs}' on machine_id '{mach_id}'"
|
||||||
f"'https://github.com/saltstack/salt-test-pillar-gitfs.git' on machine_id '{mach_id}'"
|
|
||||||
)
|
)
|
||||||
],
|
],
|
||||||
[],
|
[],
|
||||||
|
|
106
tests/pytests/integration/minion/test_schedule_large_event.py
Normal file
106
tests/pytests/integration/minion/test_schedule_large_event.py
Normal file
|
@ -0,0 +1,106 @@
|
||||||
|
import sys
|
||||||
|
|
||||||
|
import pytest
|
||||||
|
|
||||||
|
import salt.utils.event
|
||||||
|
import salt.utils.platform
|
||||||
|
import tests.support.helpers
|
||||||
|
from tests.conftest import FIPS_TESTRUN
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def salt_master_1(request, salt_factories):
|
||||||
|
config_defaults = {
|
||||||
|
"open_mode": True,
|
||||||
|
"transport": request.config.getoption("--transport"),
|
||||||
|
}
|
||||||
|
config_overrides = {
|
||||||
|
"interface": "127.0.0.1",
|
||||||
|
"fips_mode": FIPS_TESTRUN,
|
||||||
|
"publish_signing_algorithm": (
|
||||||
|
"PKCS1v15-SHA224" if FIPS_TESTRUN else "PKCS1v15-SHA1"
|
||||||
|
),
|
||||||
|
}
|
||||||
|
|
||||||
|
factory = salt_factories.salt_master_daemon(
|
||||||
|
"master-1",
|
||||||
|
defaults=config_defaults,
|
||||||
|
overrides=config_overrides,
|
||||||
|
extra_cli_arguments_after_first_start_failure=["--log-level=info"],
|
||||||
|
)
|
||||||
|
with factory.started(start_timeout=120):
|
||||||
|
yield factory
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def salt_minion_1(salt_master_1):
|
||||||
|
config_defaults = {
|
||||||
|
"transport": salt_master_1.config["transport"],
|
||||||
|
}
|
||||||
|
master_1_port = salt_master_1.config["ret_port"]
|
||||||
|
master_1_addr = salt_master_1.config["interface"]
|
||||||
|
config_overrides = {
|
||||||
|
"master": [
|
||||||
|
f"{master_1_addr}:{master_1_port}",
|
||||||
|
],
|
||||||
|
"test.foo": "baz",
|
||||||
|
"fips_mode": FIPS_TESTRUN,
|
||||||
|
"encryption_algorithm": "OAEP-SHA224" if FIPS_TESTRUN else "OAEP-SHA1",
|
||||||
|
"signing_algorithm": "PKCS1v15-SHA224" if FIPS_TESTRUN else "PKCS1v15-SHA1",
|
||||||
|
}
|
||||||
|
factory = salt_master_1.salt_minion_daemon(
|
||||||
|
"minion-1",
|
||||||
|
defaults=config_defaults,
|
||||||
|
overrides=config_overrides,
|
||||||
|
extra_cli_arguments_after_first_start_failure=["--log-level=info"],
|
||||||
|
)
|
||||||
|
with factory.started(start_timeout=120):
|
||||||
|
yield factory
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def script(salt_minion_1, tmp_path):
|
||||||
|
path = tmp_path / "script.py"
|
||||||
|
content = f"""
|
||||||
|
import salt.config
|
||||||
|
import salt.utils.event
|
||||||
|
|
||||||
|
opts = salt.config.minion_config('{salt_minion_1.config_file}')
|
||||||
|
|
||||||
|
payload = b'0' * 1048576000
|
||||||
|
|
||||||
|
big_event = dict()
|
||||||
|
for i in range(10000):
|
||||||
|
big_event[i] = payload = b'0' * 100
|
||||||
|
|
||||||
|
with salt.utils.event.get_event("minion", opts=opts) as event:
|
||||||
|
event.fire_master(big_event, 'bigevent')
|
||||||
|
|
||||||
|
"""
|
||||||
|
path.write_text(tests.support.helpers.dedent(content))
|
||||||
|
return path
|
||||||
|
|
||||||
|
|
||||||
|
# @pytest.mark.timeout_unless_on_windows(360)
|
||||||
|
def test_schedule_large_event(salt_master_1, salt_minion_1, script):
|
||||||
|
cli = salt_master_1.salt_cli(timeout=120)
|
||||||
|
ret = cli.run(
|
||||||
|
"schedule.add",
|
||||||
|
name="myjob",
|
||||||
|
function="cmd.run",
|
||||||
|
seconds=5,
|
||||||
|
job_args=f'["{sys.executable} {script}"]',
|
||||||
|
minion_tgt=salt_minion_1.id,
|
||||||
|
)
|
||||||
|
assert "result" in ret.data
|
||||||
|
assert ret.data["result"]
|
||||||
|
with salt.utils.event.get_event(
|
||||||
|
"master",
|
||||||
|
salt_master_1.config["sock_dir"],
|
||||||
|
salt_master_1.config["transport"],
|
||||||
|
salt_master_1.config,
|
||||||
|
) as event:
|
||||||
|
event = event.get_event(tag="bigevent", wait=15)
|
||||||
|
assert event
|
||||||
|
assert "data" in event
|
||||||
|
assert len(event["data"]) == 10000
|
|
@ -25,6 +25,12 @@ def salt_mm_master_1(request, salt_factories):
|
||||||
"publish_signing_algorithm": (
|
"publish_signing_algorithm": (
|
||||||
"PKCS1v15-SHA224" if FIPS_TESTRUN else "PKCS1v15-SHA1"
|
"PKCS1v15-SHA224" if FIPS_TESTRUN else "PKCS1v15-SHA1"
|
||||||
),
|
),
|
||||||
|
"log_granular_levels": {
|
||||||
|
"salt": "info",
|
||||||
|
"salt.transport": "debug",
|
||||||
|
"salt.channel": "debug",
|
||||||
|
"salt.utils.event": "debug",
|
||||||
|
},
|
||||||
}
|
}
|
||||||
factory = salt_factories.salt_master_daemon(
|
factory = salt_factories.salt_master_daemon(
|
||||||
"mm-master-1",
|
"mm-master-1",
|
||||||
|
@ -56,6 +62,12 @@ def salt_mm_master_2(salt_factories, salt_mm_master_1):
|
||||||
"publish_signing_algorithm": (
|
"publish_signing_algorithm": (
|
||||||
"PKCS1v15-SHA224" if FIPS_TESTRUN else "PKCS1v15-SHA1"
|
"PKCS1v15-SHA224" if FIPS_TESTRUN else "PKCS1v15-SHA1"
|
||||||
),
|
),
|
||||||
|
"log_granular_levels": {
|
||||||
|
"salt": "info",
|
||||||
|
"salt.transport": "debug",
|
||||||
|
"salt.channel": "debug",
|
||||||
|
"salt.utils.event": "debug",
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
# Use the same ports for both masters, they are binding to different interfaces
|
# Use the same ports for both masters, they are binding to different interfaces
|
||||||
|
@ -106,6 +118,13 @@ def salt_mm_minion_1(salt_mm_master_1, salt_mm_master_2):
|
||||||
"fips_mode": FIPS_TESTRUN,
|
"fips_mode": FIPS_TESTRUN,
|
||||||
"encryption_algorithm": "OAEP-SHA224" if FIPS_TESTRUN else "OAEP-SHA1",
|
"encryption_algorithm": "OAEP-SHA224" if FIPS_TESTRUN else "OAEP-SHA1",
|
||||||
"signing_algorithm": "PKCS1v15-SHA224" if FIPS_TESTRUN else "PKCS1v15-SHA1",
|
"signing_algorithm": "PKCS1v15-SHA224" if FIPS_TESTRUN else "PKCS1v15-SHA1",
|
||||||
|
"log_granular_levels": {
|
||||||
|
"salt": "info",
|
||||||
|
"salt.minion": "debug",
|
||||||
|
"salt.transport": "debug",
|
||||||
|
"salt.channel": "debug",
|
||||||
|
"salt.utils.event": "debug",
|
||||||
|
},
|
||||||
}
|
}
|
||||||
factory = salt_mm_master_1.salt_minion_daemon(
|
factory = salt_mm_master_1.salt_minion_daemon(
|
||||||
"mm-minion-1",
|
"mm-minion-1",
|
||||||
|
@ -136,6 +155,13 @@ def salt_mm_minion_2(salt_mm_master_1, salt_mm_master_2):
|
||||||
"fips_mode": FIPS_TESTRUN,
|
"fips_mode": FIPS_TESTRUN,
|
||||||
"encryption_algorithm": "OAEP-SHA224" if FIPS_TESTRUN else "OAEP-SHA1",
|
"encryption_algorithm": "OAEP-SHA224" if FIPS_TESTRUN else "OAEP-SHA1",
|
||||||
"signing_algorithm": "PKCS1v15-SHA224" if FIPS_TESTRUN else "PKCS1v15-SHA1",
|
"signing_algorithm": "PKCS1v15-SHA224" if FIPS_TESTRUN else "PKCS1v15-SHA1",
|
||||||
|
"log_granular_levels": {
|
||||||
|
"salt": "info",
|
||||||
|
"salt.minion": "debug",
|
||||||
|
"salt.transport": "debug",
|
||||||
|
"salt.channel": "debug",
|
||||||
|
"salt.utils.event": "debug",
|
||||||
|
},
|
||||||
}
|
}
|
||||||
factory = salt_mm_master_2.salt_minion_daemon(
|
factory = salt_mm_master_2.salt_minion_daemon(
|
||||||
"mm-minion-2",
|
"mm-minion-2",
|
||||||
|
|
|
@ -3379,6 +3379,12 @@ def test_linux_gpus(caplog):
|
||||||
"Vega [Radeon RX Vega]]",
|
"Vega [Radeon RX Vega]]",
|
||||||
"amd",
|
"amd",
|
||||||
], # AMD
|
], # AMD
|
||||||
|
[
|
||||||
|
"Processing accelerators",
|
||||||
|
"Advanced Micro Devices, Inc. [AMD/ATI]",
|
||||||
|
"Device X",
|
||||||
|
"amd",
|
||||||
|
], # AMD
|
||||||
[
|
[
|
||||||
"Audio device",
|
"Audio device",
|
||||||
"Advanced Micro Devices, Inc. [AMD/ATI]",
|
"Advanced Micro Devices, Inc. [AMD/ATI]",
|
||||||
|
|
|
@ -349,6 +349,9 @@ def _test_set_user_policy(lgpo_bin, shell, name, setting, exp_regexes):
|
||||||
],
|
],
|
||||||
),
|
),
|
||||||
(
|
(
|
||||||
|
# This will need to be fixed for Windows Server 2025
|
||||||
|
# The bottom two options have been removed in 2025
|
||||||
|
# Though not set here, we're verifying there were set
|
||||||
"Specify settings for optional component installation and component repair",
|
"Specify settings for optional component installation and component repair",
|
||||||
"Disabled",
|
"Disabled",
|
||||||
[
|
[
|
||||||
|
@ -358,6 +361,8 @@ def _test_set_user_policy(lgpo_bin, shell, name, setting, exp_regexes):
|
||||||
],
|
],
|
||||||
),
|
),
|
||||||
(
|
(
|
||||||
|
# This will need to be fixed for Windows Server 2025
|
||||||
|
# The bottom two options have been removed in 2025
|
||||||
"Specify settings for optional component installation and component repair",
|
"Specify settings for optional component installation and component repair",
|
||||||
{
|
{
|
||||||
"Alternate source file path": "",
|
"Alternate source file path": "",
|
||||||
|
@ -371,6 +376,8 @@ def _test_set_user_policy(lgpo_bin, shell, name, setting, exp_regexes):
|
||||||
],
|
],
|
||||||
),
|
),
|
||||||
(
|
(
|
||||||
|
# This will need to be fixed for Windows Server 2025
|
||||||
|
# The bottom two options have been removed in 2025
|
||||||
"Specify settings for optional component installation and component repair",
|
"Specify settings for optional component installation and component repair",
|
||||||
{
|
{
|
||||||
"Alternate source file path": r"\\some\fake\server",
|
"Alternate source file path": r"\\some\fake\server",
|
||||||
|
@ -757,3 +764,16 @@ def test_set_computer_policy_multiple_policies(clean_comp, lgpo_bin, shell):
|
||||||
r"\\AU[\s]*AllowMUUpdateService[\s]*DELETE",
|
r"\\AU[\s]*AllowMUUpdateService[\s]*DELETE",
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def test__encode_xmlns_url():
|
||||||
|
"""
|
||||||
|
Tests the _encode_xmlns_url function.
|
||||||
|
Spaces in the xmlns url should be converted to %20
|
||||||
|
"""
|
||||||
|
line = '<policyDefinitionResources xmlns:xsd="http://www.w3.org/2001/XMLSchema" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" revision="1.0" schemaVersion="1.0" xmlns="http://schemas.microsoft.com/GroupPolicy/2006/07/Policysecurity intelligence">'
|
||||||
|
result = re.sub(
|
||||||
|
r'(.*)(\bxmlns(?::\w+)?)\s*=\s*"([^"]+)"(.*)', win_lgpo._encode_xmlns_url, line
|
||||||
|
)
|
||||||
|
expected = '<policyDefinitionResources xmlns:xsd="http://www.w3.org/2001/XMLSchema" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" revision="1.0" schemaVersion="1.0" xmlns="http://schemas.microsoft.com/GroupPolicy/2006/07/Policysecurity%20intelligence">'
|
||||||
|
assert result == expected
|
||||||
|
|
|
@ -2,6 +2,7 @@ import asyncio
|
||||||
import copy
|
import copy
|
||||||
import logging
|
import logging
|
||||||
import os
|
import os
|
||||||
|
import uuid
|
||||||
|
|
||||||
import pytest
|
import pytest
|
||||||
import tornado
|
import tornado
|
||||||
|
@ -102,12 +103,15 @@ def test_minion_load_grains_default(minion_opts):
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
def test_send_req_fires_completion_event(event, minion_opts):
|
def test_send_req_fires_completion_event(event, minion_opts):
|
||||||
|
req_id = uuid.uuid4()
|
||||||
event_enter = MagicMock()
|
event_enter = MagicMock()
|
||||||
event_enter.send.side_effect = event[1]
|
event_enter.send.side_effect = event[1]
|
||||||
event = MagicMock()
|
event = MagicMock()
|
||||||
event.__enter__.return_value = event_enter
|
event.__enter__.return_value = event_enter
|
||||||
|
|
||||||
with patch("salt.utils.event.get_event", return_value=event):
|
with patch("salt.utils.event.get_event", return_value=event), patch(
|
||||||
|
"uuid.uuid4", return_value=req_id
|
||||||
|
):
|
||||||
minion_opts["random_startup_delay"] = 0
|
minion_opts["random_startup_delay"] = 0
|
||||||
minion_opts["return_retry_tries"] = 30
|
minion_opts["return_retry_tries"] = 30
|
||||||
minion_opts["grains"] = {}
|
minion_opts["grains"] = {}
|
||||||
|
@ -132,7 +136,7 @@ def test_send_req_fires_completion_event(event, minion_opts):
|
||||||
condition_event_tag = (
|
condition_event_tag = (
|
||||||
len(call.args) > 1
|
len(call.args) > 1
|
||||||
and call.args[1]
|
and call.args[1]
|
||||||
== f"__master_req_channel_payload/{minion_opts['master']}"
|
== f"__master_req_channel_payload/{req_id}/{minion_opts['master']}"
|
||||||
)
|
)
|
||||||
condition_event_tag_error = (
|
condition_event_tag_error = (
|
||||||
"{} != {}; Call(number={}): {}".format(
|
"{} != {}; Call(number={}): {}".format(
|
||||||
|
@ -167,18 +171,18 @@ async def test_send_req_async_regression_62453(minion_opts):
|
||||||
event.__enter__.return_value = event_enter
|
event.__enter__.return_value = event_enter
|
||||||
|
|
||||||
minion_opts["random_startup_delay"] = 0
|
minion_opts["random_startup_delay"] = 0
|
||||||
minion_opts["return_retry_tries"] = 30
|
minion_opts["return_retry_tries"] = 5
|
||||||
minion_opts["grains"] = {}
|
minion_opts["grains"] = {}
|
||||||
minion_opts["ipc_mode"] = "tcp"
|
minion_opts["ipc_mode"] = "tcp"
|
||||||
with patch("salt.loader.grains"):
|
with patch("salt.loader.grains"):
|
||||||
minion = salt.minion.Minion(minion_opts)
|
minion = salt.minion.Minion(minion_opts)
|
||||||
|
|
||||||
load = {"load": "value"}
|
load = {"load": "value"}
|
||||||
timeout = 60
|
timeout = 1
|
||||||
|
|
||||||
# We are just validating no exception is raised
|
# We are just validating no exception is raised
|
||||||
rtn = await minion._send_req_async(load, timeout)
|
with pytest.raises(TimeoutError):
|
||||||
assert rtn is False
|
rtn = await minion._send_req_async(load, timeout)
|
||||||
|
|
||||||
|
|
||||||
def test_mine_send_tries(minion_opts):
|
def test_mine_send_tries(minion_opts):
|
||||||
|
|
|
@ -23,6 +23,14 @@ except AttributeError:
|
||||||
if HAS_PYGIT2:
|
if HAS_PYGIT2:
|
||||||
import pygit2
|
import pygit2
|
||||||
|
|
||||||
|
try:
|
||||||
|
from pygit2.enums import ObjectType
|
||||||
|
|
||||||
|
HAS_PYGIT2_ENUMS = True
|
||||||
|
|
||||||
|
except ModuleNotFoundError:
|
||||||
|
HAS_PYGIT2_ENUMS = False
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture
|
@pytest.fixture
|
||||||
def minion_opts(tmp_path):
|
def minion_opts(tmp_path):
|
||||||
|
@ -147,9 +155,14 @@ def _prepare_remote_repository_pygit2(tmp_path):
|
||||||
tree,
|
tree,
|
||||||
[repository.head.target],
|
[repository.head.target],
|
||||||
)
|
)
|
||||||
repository.create_tag(
|
if HAS_PYGIT2_ENUMS:
|
||||||
"annotated_tag", commit, pygit2.GIT_OBJ_COMMIT, signature, "some message"
|
repository.create_tag(
|
||||||
)
|
"annotated_tag", commit, ObjectType.COMMIT, signature, "some message"
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
repository.create_tag(
|
||||||
|
"annotated_tag", commit, pygit2.GIT_OBJ_COMMIT, signature, "some message"
|
||||||
|
)
|
||||||
return remote
|
return remote
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -9,72 +9,42 @@ pytestmark = [
|
||||||
]
|
]
|
||||||
|
|
||||||
|
|
||||||
def test_get_settings_firewallpolicy_local():
|
@pytest.mark.parametrize("store", ["local", "lgpo"])
|
||||||
|
def test_get_settings_firewallpolicy(store):
|
||||||
ret = win_lgpo_netsh.get_settings(
|
ret = win_lgpo_netsh.get_settings(
|
||||||
profile="domain", section="firewallpolicy", store="local"
|
profile="domain", section="firewallpolicy", store=store
|
||||||
)
|
)
|
||||||
assert "Inbound" in ret
|
assert "Inbound" in ret
|
||||||
assert "Outbound" in ret
|
assert "Outbound" in ret
|
||||||
|
|
||||||
|
|
||||||
def test_get_settings_firewallpolicy_lgpo():
|
@pytest.mark.parametrize("store", ["local", "lgpo"])
|
||||||
ret = win_lgpo_netsh.get_settings(
|
def test_get_settings_logging(store):
|
||||||
profile="domain", section="firewallpolicy", store="lgpo"
|
ret = win_lgpo_netsh.get_settings(profile="domain", section="logging", store=store)
|
||||||
)
|
|
||||||
assert "Inbound" in ret
|
|
||||||
assert "Outbound" in ret
|
|
||||||
|
|
||||||
|
|
||||||
def test_get_settings_logging_local():
|
|
||||||
ret = win_lgpo_netsh.get_settings(
|
|
||||||
profile="domain", section="logging", store="local"
|
|
||||||
)
|
|
||||||
assert "FileName" in ret
|
assert "FileName" in ret
|
||||||
assert "LogAllowedConnections" in ret
|
assert "LogAllowedConnections" in ret
|
||||||
assert "LogDroppedConnections" in ret
|
assert "LogDroppedConnections" in ret
|
||||||
assert "MaxFileSize" in ret
|
assert "MaxFileSize" in ret
|
||||||
|
|
||||||
|
|
||||||
def test_get_settings_logging_lgpo():
|
@pytest.mark.parametrize("store", ["local", "lgpo"])
|
||||||
ret = win_lgpo_netsh.get_settings(profile="domain", section="logging", store="lgpo")
|
def test_get_settings_settings(store):
|
||||||
assert "FileName" in ret
|
ret = win_lgpo_netsh.get_settings(profile="domain", section="settings", store=store)
|
||||||
assert "LogAllowedConnections" in ret
|
|
||||||
assert "LogDroppedConnections" in ret
|
|
||||||
assert "MaxFileSize" in ret
|
|
||||||
|
|
||||||
|
|
||||||
def test_get_settings_settings_local():
|
|
||||||
ret = win_lgpo_netsh.get_settings(
|
|
||||||
profile="domain", section="settings", store="local"
|
|
||||||
)
|
|
||||||
assert "InboundUserNotification" in ret
|
assert "InboundUserNotification" in ret
|
||||||
assert "LocalConSecRules" in ret
|
assert "LocalConSecRules" in ret
|
||||||
assert "LocalFirewallRules" in ret
|
assert "LocalFirewallRules" in ret
|
||||||
assert "UnicastResponseToMulticast" in ret
|
assert "UnicastResponseToMulticast" in ret
|
||||||
|
|
||||||
|
|
||||||
def test_get_settings_settings_lgpo():
|
@pytest.mark.parametrize("store", ["local", "lgpo"])
|
||||||
ret = win_lgpo_netsh.get_settings(
|
def test_get_settings_state(store):
|
||||||
profile="domain", section="settings", store="lgpo"
|
ret = win_lgpo_netsh.get_settings(profile="domain", section="state", store=store)
|
||||||
)
|
|
||||||
assert "InboundUserNotification" in ret
|
|
||||||
assert "LocalConSecRules" in ret
|
|
||||||
assert "LocalFirewallRules" in ret
|
|
||||||
assert "UnicastResponseToMulticast" in ret
|
|
||||||
|
|
||||||
|
|
||||||
def test_get_settings_state_local():
|
|
||||||
ret = win_lgpo_netsh.get_settings(profile="domain", section="state", store="local")
|
|
||||||
assert "State" in ret
|
assert "State" in ret
|
||||||
|
|
||||||
|
|
||||||
def test_get_settings_state_lgpo():
|
@pytest.mark.parametrize("store", ["local", "lgpo"])
|
||||||
ret = win_lgpo_netsh.get_settings(profile="domain", section="state", store="lgpo")
|
def test_get_all_settings(store):
|
||||||
assert "State" in ret
|
ret = win_lgpo_netsh.get_all_settings(profile="domain", store=store)
|
||||||
|
|
||||||
|
|
||||||
def test_get_all_settings_local():
|
|
||||||
ret = win_lgpo_netsh.get_all_settings(profile="domain", store="local")
|
|
||||||
assert "Inbound" in ret
|
assert "Inbound" in ret
|
||||||
assert "Outbound" in ret
|
assert "Outbound" in ret
|
||||||
assert "FileName" in ret
|
assert "FileName" in ret
|
||||||
|
@ -88,470 +58,287 @@ def test_get_all_settings_local():
|
||||||
assert "State" in ret
|
assert "State" in ret
|
||||||
|
|
||||||
|
|
||||||
def test_get_all_settings_lgpo():
|
@pytest.mark.parametrize("store", ["local", "lgpo"])
|
||||||
ret = win_lgpo_netsh.get_all_settings(profile="domain", store="local")
|
def test_get_all_profiles(store):
|
||||||
assert "Inbound" in ret
|
ret = win_lgpo_netsh.get_all_profiles(store=store)
|
||||||
assert "Outbound" in ret
|
|
||||||
assert "FileName" in ret
|
|
||||||
assert "LogAllowedConnections" in ret
|
|
||||||
assert "LogDroppedConnections" in ret
|
|
||||||
assert "MaxFileSize" in ret
|
|
||||||
assert "InboundUserNotification" in ret
|
|
||||||
assert "LocalConSecRules" in ret
|
|
||||||
assert "LocalFirewallRules" in ret
|
|
||||||
assert "UnicastResponseToMulticast" in ret
|
|
||||||
assert "State" in ret
|
|
||||||
|
|
||||||
|
|
||||||
def test_get_all_profiles_local():
|
|
||||||
ret = win_lgpo_netsh.get_all_profiles(store="local")
|
|
||||||
assert "Domain Profile" in ret
|
|
||||||
assert "Private Profile" in ret
|
|
||||||
assert "Public Profile" in ret
|
|
||||||
|
|
||||||
|
|
||||||
def test_get_all_profiles_lgpo():
|
|
||||||
ret = win_lgpo_netsh.get_all_profiles(store="lgpo")
|
|
||||||
assert "Domain Profile" in ret
|
assert "Domain Profile" in ret
|
||||||
assert "Private Profile" in ret
|
assert "Private Profile" in ret
|
||||||
assert "Public Profile" in ret
|
assert "Public Profile" in ret
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.destructive_test
|
@pytest.mark.destructive_test
|
||||||
def test_set_firewall_settings_inbound_local():
|
@pytest.mark.parametrize("store", ["local", "lgpo"])
|
||||||
current = win_lgpo_netsh.get_settings(
|
@pytest.mark.parametrize(
|
||||||
profile="domain", section="firewallpolicy", store="local"
|
"inbound", ["allowinbound", "blockinbound", "blockinboundalways", "notconfigured"]
|
||||||
)["Inbound"]
|
)
|
||||||
try:
|
def test_set_firewall_settings_inbound(store, inbound):
|
||||||
ret = win_lgpo_netsh.set_firewall_settings(
|
if inbound == "notconfigured" and store == "local":
|
||||||
profile="domain", inbound="allowinbound", store="local"
|
|
||||||
)
|
|
||||||
assert ret is True
|
|
||||||
new = win_lgpo_netsh.get_settings(
|
|
||||||
profile="domain", section="firewallpolicy", store="local"
|
|
||||||
)["Inbound"]
|
|
||||||
assert new == "AllowInbound"
|
|
||||||
finally:
|
|
||||||
ret = win_lgpo_netsh.set_firewall_settings(
|
|
||||||
profile="domain", inbound=current, store="local"
|
|
||||||
)
|
|
||||||
assert ret is True
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.destructive_test
|
|
||||||
def test_set_firewall_settings_inbound_local_notconfigured():
|
|
||||||
current = win_lgpo_netsh.get_settings(
|
|
||||||
profile="domain", section="firewallpolicy", store="local"
|
|
||||||
)["Inbound"]
|
|
||||||
try:
|
|
||||||
pytest.raises(
|
pytest.raises(
|
||||||
CommandExecutionError,
|
CommandExecutionError,
|
||||||
win_lgpo_netsh.set_firewall_settings,
|
win_lgpo_netsh.set_firewall_settings,
|
||||||
profile="domain",
|
profile="domain",
|
||||||
inbound="notconfigured",
|
inbound=inbound,
|
||||||
store="local",
|
store=store,
|
||||||
)
|
)
|
||||||
finally:
|
else:
|
||||||
ret = win_lgpo_netsh.set_firewall_settings(
|
current = win_lgpo_netsh.get_settings(
|
||||||
profile="domain", inbound=current, store="local"
|
profile="domain", section="firewallpolicy", store=store
|
||||||
)
|
|
||||||
assert ret is True
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.destructive_test
|
|
||||||
def test_set_firewall_settings_inbound_lgpo_notconfigured():
|
|
||||||
current = win_lgpo_netsh.get_settings(
|
|
||||||
profile="domain", section="firewallpolicy", store="lgpo"
|
|
||||||
)["Inbound"]
|
|
||||||
try:
|
|
||||||
ret = win_lgpo_netsh.set_firewall_settings(
|
|
||||||
profile="domain", inbound="notconfigured", store="lgpo"
|
|
||||||
)
|
|
||||||
assert ret is True
|
|
||||||
new = win_lgpo_netsh.get_settings(
|
|
||||||
profile="domain", section="firewallpolicy", store="lgpo"
|
|
||||||
)["Inbound"]
|
)["Inbound"]
|
||||||
assert new == "NotConfigured"
|
try:
|
||||||
finally:
|
ret = win_lgpo_netsh.set_firewall_settings(
|
||||||
ret = win_lgpo_netsh.set_firewall_settings(
|
profile="domain", inbound=inbound, store=store
|
||||||
profile="domain", inbound=current, store="lgpo"
|
)
|
||||||
)
|
assert ret is True
|
||||||
assert ret is True
|
new = win_lgpo_netsh.get_settings(
|
||||||
|
profile="domain", section="firewallpolicy", store=store
|
||||||
|
)["Inbound"]
|
||||||
|
assert new.lower() == inbound
|
||||||
|
finally:
|
||||||
|
ret = win_lgpo_netsh.set_firewall_settings(
|
||||||
|
profile="domain", inbound=current, store=store
|
||||||
|
)
|
||||||
|
assert ret is True
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.destructive_test
|
@pytest.mark.destructive_test
|
||||||
def test_set_firewall_settings_outbound_local():
|
@pytest.mark.parametrize("store", ["local", "lgpo"])
|
||||||
current = win_lgpo_netsh.get_settings(
|
@pytest.mark.parametrize(
|
||||||
profile="domain", section="firewallpolicy", store="local"
|
"outbound", ["allowoutbound", "blockoutbound", "notconfigured"]
|
||||||
)["Outbound"]
|
)
|
||||||
try:
|
def test_set_firewall_settings_outbound(store, outbound):
|
||||||
ret = win_lgpo_netsh.set_firewall_settings(
|
if outbound == "notconfigured" and store == "local":
|
||||||
profile="domain", outbound="allowoutbound", store="local"
|
pytest.raises(
|
||||||
|
CommandExecutionError,
|
||||||
|
win_lgpo_netsh.set_firewall_settings,
|
||||||
|
profile="domain",
|
||||||
|
inbound=outbound,
|
||||||
|
store=store,
|
||||||
)
|
)
|
||||||
assert ret is True
|
else:
|
||||||
new = win_lgpo_netsh.get_settings(
|
current = win_lgpo_netsh.get_settings(
|
||||||
profile="domain", section="firewallpolicy", store="local"
|
profile="domain", section="firewallpolicy", store=store
|
||||||
)["Outbound"]
|
)["Outbound"]
|
||||||
assert new == "AllowOutbound"
|
try:
|
||||||
finally:
|
ret = win_lgpo_netsh.set_firewall_settings(
|
||||||
ret = win_lgpo_netsh.set_firewall_settings(
|
profile="domain", outbound=outbound, store=store
|
||||||
profile="domain", outbound=current, store="local"
|
)
|
||||||
)
|
assert ret is True
|
||||||
assert ret is True
|
new = win_lgpo_netsh.get_settings(
|
||||||
|
profile="domain", section="firewallpolicy", store=store
|
||||||
|
)["Outbound"]
|
||||||
|
assert new.lower() == outbound
|
||||||
|
finally:
|
||||||
|
ret = win_lgpo_netsh.set_firewall_settings(
|
||||||
|
profile="domain", outbound=current, store=store
|
||||||
|
)
|
||||||
|
assert ret is True
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.destructive_test
|
@pytest.mark.destructive_test
|
||||||
def test_set_firewall_logging_allowed_local_enable():
|
@pytest.mark.parametrize("store", ["local", "lgpo"])
|
||||||
current = win_lgpo_netsh.get_settings(
|
@pytest.mark.parametrize("setting", ["allowedconnections", "droppedconnections"])
|
||||||
profile="domain", section="logging", store="local"
|
@pytest.mark.parametrize("value", ["enable", "disable", "notconfigured"])
|
||||||
)["LogAllowedConnections"]
|
def test_set_firewall_logging_connections(store, setting, value):
|
||||||
try:
|
if value == "notconfigured" and store == "local":
|
||||||
ret = win_lgpo_netsh.set_logging_settings(
|
|
||||||
profile="domain",
|
|
||||||
setting="allowedconnections",
|
|
||||||
value="enable",
|
|
||||||
store="local",
|
|
||||||
)
|
|
||||||
assert ret is True
|
|
||||||
new = win_lgpo_netsh.get_settings(
|
|
||||||
profile="domain", section="logging", store="local"
|
|
||||||
)["LogAllowedConnections"]
|
|
||||||
assert new == "Enable"
|
|
||||||
finally:
|
|
||||||
ret = win_lgpo_netsh.set_logging_settings(
|
|
||||||
profile="domain",
|
|
||||||
setting="allowedconnections",
|
|
||||||
value=current,
|
|
||||||
store="local",
|
|
||||||
)
|
|
||||||
assert ret is True
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.destructive_test
|
|
||||||
def test_set_firewall_logging_allowed_local_notconfigured():
|
|
||||||
current = win_lgpo_netsh.get_settings(
|
|
||||||
profile="domain", section="logging", store="local"
|
|
||||||
)["LogAllowedConnections"]
|
|
||||||
try:
|
|
||||||
pytest.raises(
|
pytest.raises(
|
||||||
CommandExecutionError,
|
CommandExecutionError,
|
||||||
win_lgpo_netsh.set_logging_settings,
|
win_lgpo_netsh.set_logging_settings,
|
||||||
profile="domain",
|
profile="domain",
|
||||||
setting="allowedconnections",
|
setting=setting,
|
||||||
value="notconfigured",
|
value=value,
|
||||||
store="local",
|
store=store,
|
||||||
)
|
)
|
||||||
finally:
|
else:
|
||||||
ret = win_lgpo_netsh.set_logging_settings(
|
setting_map = {
|
||||||
profile="domain",
|
"allowedconnections": "LogAllowedConnections",
|
||||||
setting="allowedconnections",
|
"droppedconnections": "LogDroppedConnections",
|
||||||
value=current,
|
}
|
||||||
store="local",
|
current = win_lgpo_netsh.get_settings(
|
||||||
)
|
profile="domain", section="logging", store=store
|
||||||
assert ret is True
|
)[setting_map[setting]]
|
||||||
|
try:
|
||||||
|
ret = win_lgpo_netsh.set_logging_settings(
|
||||||
|
profile="domain",
|
||||||
|
setting=setting,
|
||||||
|
value=value,
|
||||||
|
store=store,
|
||||||
|
)
|
||||||
|
assert ret is True
|
||||||
|
new = win_lgpo_netsh.get_settings(
|
||||||
|
profile="domain", section="logging", store=store
|
||||||
|
)[setting_map[setting]]
|
||||||
|
assert new.lower() == value
|
||||||
|
finally:
|
||||||
|
ret = win_lgpo_netsh.set_logging_settings(
|
||||||
|
profile="domain",
|
||||||
|
setting=setting,
|
||||||
|
value=current,
|
||||||
|
store=store,
|
||||||
|
)
|
||||||
|
assert ret is True
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.destructive_test
|
@pytest.mark.destructive_test
|
||||||
def test_set_firewall_logging_allowed_lgpo_notconfigured():
|
@pytest.mark.parametrize("store", ["local", "lgpo"])
|
||||||
|
@pytest.mark.parametrize("value", ["C:\\Temp\\test.log", "notconfigured"])
|
||||||
|
def test_set_firewall_logging_filename(store, value):
|
||||||
current = win_lgpo_netsh.get_settings(
|
current = win_lgpo_netsh.get_settings(
|
||||||
profile="domain", section="logging", store="lgpo"
|
profile="domain", section="logging", store=store
|
||||||
)["LogAllowedConnections"]
|
|
||||||
try:
|
|
||||||
ret = win_lgpo_netsh.set_logging_settings(
|
|
||||||
profile="domain",
|
|
||||||
setting="allowedconnections",
|
|
||||||
value="notconfigured",
|
|
||||||
store="lgpo",
|
|
||||||
)
|
|
||||||
assert ret is True
|
|
||||||
new = win_lgpo_netsh.get_settings(
|
|
||||||
profile="domain", section="logging", store="lgpo"
|
|
||||||
)["LogAllowedConnections"]
|
|
||||||
assert new == "NotConfigured"
|
|
||||||
finally:
|
|
||||||
ret = win_lgpo_netsh.set_logging_settings(
|
|
||||||
profile="domain",
|
|
||||||
setting="allowedconnections",
|
|
||||||
value=current,
|
|
||||||
store="lgpo",
|
|
||||||
)
|
|
||||||
assert ret is True
|
|
||||||
|
|
||||||
|
|
||||||
def test_set_firewall_logging_dropped_local_enable():
|
|
||||||
current = win_lgpo_netsh.get_settings(
|
|
||||||
profile="domain", section="logging", store="local"
|
|
||||||
)["LogDroppedConnections"]
|
|
||||||
try:
|
|
||||||
ret = win_lgpo_netsh.set_logging_settings(
|
|
||||||
profile="domain",
|
|
||||||
setting="droppedconnections",
|
|
||||||
value="enable",
|
|
||||||
store="local",
|
|
||||||
)
|
|
||||||
assert ret is True
|
|
||||||
new = win_lgpo_netsh.get_settings(
|
|
||||||
profile="domain", section="logging", store="local"
|
|
||||||
)["LogDroppedConnections"]
|
|
||||||
assert new == "Enable"
|
|
||||||
finally:
|
|
||||||
ret = win_lgpo_netsh.set_logging_settings(
|
|
||||||
profile="domain",
|
|
||||||
setting="droppedconnections",
|
|
||||||
value=current,
|
|
||||||
store="local",
|
|
||||||
)
|
|
||||||
assert ret is True
|
|
||||||
|
|
||||||
|
|
||||||
def test_set_firewall_logging_filename_local():
|
|
||||||
current = win_lgpo_netsh.get_settings(
|
|
||||||
profile="domain", section="logging", store="local"
|
|
||||||
)["FileName"]
|
)["FileName"]
|
||||||
try:
|
try:
|
||||||
ret = win_lgpo_netsh.set_logging_settings(
|
ret = win_lgpo_netsh.set_logging_settings(
|
||||||
profile="domain",
|
profile="domain",
|
||||||
setting="filename",
|
setting="filename",
|
||||||
value="C:\\Temp\\test.log",
|
value=value,
|
||||||
store="local",
|
store=store,
|
||||||
)
|
)
|
||||||
assert ret is True
|
assert ret is True
|
||||||
new = win_lgpo_netsh.get_settings(
|
new = win_lgpo_netsh.get_settings(
|
||||||
profile="domain", section="logging", store="local"
|
profile="domain", section="logging", store=store
|
||||||
)["FileName"]
|
)["FileName"]
|
||||||
assert new == "C:\\Temp\\test.log"
|
assert new.lower() == value.lower()
|
||||||
finally:
|
finally:
|
||||||
ret = win_lgpo_netsh.set_logging_settings(
|
ret = win_lgpo_netsh.set_logging_settings(
|
||||||
profile="domain", setting="filename", value=current, store="local"
|
profile="domain", setting="filename", value=current, store=store
|
||||||
)
|
)
|
||||||
assert ret is True
|
assert ret is True
|
||||||
|
|
||||||
|
|
||||||
def test_set_firewall_logging_maxfilesize_local():
|
@pytest.mark.destructive_test
|
||||||
current = win_lgpo_netsh.get_settings(
|
@pytest.mark.parametrize("store", ["local", "lgpo"])
|
||||||
profile="domain", section="logging", store="local"
|
@pytest.mark.parametrize("value", ["16384", "notconfigured"])
|
||||||
)["MaxFileSize"]
|
def test_set_firewall_logging_maxfilesize(store, value):
|
||||||
try:
|
if value == "notconfigured":
|
||||||
ret = win_lgpo_netsh.set_logging_settings(
|
pytest.raises(
|
||||||
profile="domain", setting="maxfilesize", value="16384", store="local"
|
CommandExecutionError,
|
||||||
|
win_lgpo_netsh.set_logging_settings,
|
||||||
|
profile="domain",
|
||||||
|
setting="maxfilesize",
|
||||||
|
value=value,
|
||||||
|
store=store,
|
||||||
)
|
)
|
||||||
|
else:
|
||||||
|
current = win_lgpo_netsh.get_settings(
|
||||||
|
profile="domain", section="logging", store=store
|
||||||
|
)["MaxFileSize"]
|
||||||
|
try:
|
||||||
|
ret = win_lgpo_netsh.set_logging_settings(
|
||||||
|
profile="domain", setting="maxfilesize", value=value, store=store
|
||||||
|
)
|
||||||
|
assert ret is True
|
||||||
|
new = win_lgpo_netsh.get_settings(
|
||||||
|
profile="domain", section="logging", store=store
|
||||||
|
)["MaxFileSize"]
|
||||||
|
assert new == int(value)
|
||||||
|
finally:
|
||||||
|
ret = win_lgpo_netsh.set_logging_settings(
|
||||||
|
profile="domain", setting="maxfilesize", value=current, store=store
|
||||||
|
)
|
||||||
|
assert ret is True
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.destructive_test
|
||||||
|
@pytest.mark.parametrize("store", ["local", "lgpo"])
|
||||||
|
@pytest.mark.parametrize(
|
||||||
|
"setting",
|
||||||
|
["localconsecrules", "inboundusernotification", "unicastresponsetomulticast"],
|
||||||
|
)
|
||||||
|
@pytest.mark.parametrize("value", ["enable", "disable", "notconfigured"])
|
||||||
|
def test_set_firewall_settings(store, setting, value):
|
||||||
|
setting_map = {
|
||||||
|
"localconsecrules": "LocalConSecRules",
|
||||||
|
"inboundusernotification": "InboundUserNotification",
|
||||||
|
"unicastresponsetomulticast": "UnicastResponseToMulticast",
|
||||||
|
}
|
||||||
|
if value == "notconfigured" and store == "local":
|
||||||
|
pytest.raises(
|
||||||
|
CommandExecutionError,
|
||||||
|
win_lgpo_netsh.set_settings,
|
||||||
|
profile="domain",
|
||||||
|
setting=setting,
|
||||||
|
value=value,
|
||||||
|
store=store,
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
current = win_lgpo_netsh.get_settings(
|
||||||
|
profile="domain", section="settings", store=store
|
||||||
|
)[setting_map[setting]]
|
||||||
|
try:
|
||||||
|
ret = win_lgpo_netsh.set_settings(
|
||||||
|
profile="domain",
|
||||||
|
setting=setting,
|
||||||
|
value=value,
|
||||||
|
store=store,
|
||||||
|
)
|
||||||
|
assert ret is True
|
||||||
|
new = win_lgpo_netsh.get_settings(
|
||||||
|
profile="domain", section="settings", store=store
|
||||||
|
)[setting_map[setting]]
|
||||||
|
assert new.lower() == value
|
||||||
|
finally:
|
||||||
|
if current != "notconfigured":
|
||||||
|
ret = win_lgpo_netsh.set_settings(
|
||||||
|
profile="domain",
|
||||||
|
setting=setting,
|
||||||
|
value=current,
|
||||||
|
store=store,
|
||||||
|
)
|
||||||
|
assert ret is True
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.destructive_test
|
||||||
|
@pytest.mark.parametrize("store", ["local", "lgpo"])
|
||||||
|
@pytest.mark.parametrize("state", ["on", "off", "notconfigured"])
|
||||||
|
def test_set_firewall_state(store, state):
|
||||||
|
current_state = win_lgpo_netsh.get_settings(
|
||||||
|
profile="domain", section="state", store=store
|
||||||
|
)["State"]
|
||||||
|
try:
|
||||||
|
ret = win_lgpo_netsh.set_state(profile="domain", state=state, store=store)
|
||||||
assert ret is True
|
assert ret is True
|
||||||
new = win_lgpo_netsh.get_settings(
|
new = win_lgpo_netsh.get_settings(
|
||||||
profile="domain", section="logging", store="local"
|
profile="domain", section="state", store=store
|
||||||
)["MaxFileSize"]
|
)["State"]
|
||||||
assert new == 16384
|
assert new.lower() == state.lower()
|
||||||
finally:
|
finally:
|
||||||
ret = win_lgpo_netsh.set_logging_settings(
|
win_lgpo_netsh.set_state(profile="domain", state=current_state, store=store)
|
||||||
profile="domain", setting="maxfilesize", value=current, store="local"
|
|
||||||
)
|
|
||||||
assert ret is True
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.destructive_test
|
@pytest.mark.destructive_test
|
||||||
def test_set_firewall_settings_fwrules_local_enable():
|
@pytest.mark.parametrize("store", ["local", "lgpo"])
|
||||||
pytest.raises(
|
@pytest.mark.parametrize("allow_inbound", ["enable", "disable"])
|
||||||
CommandExecutionError,
|
@pytest.mark.parametrize("state", ["on", "off", "notconfigured"])
|
||||||
win_lgpo_netsh.set_settings,
|
def test_set_firewall_state_allow_inbound(store, allow_inbound, state):
|
||||||
profile="domain",
|
current_state = win_lgpo_netsh.get_settings(
|
||||||
setting="localfirewallrules",
|
profile="domain", section="state", store=store
|
||||||
value="enable",
|
)["State"]
|
||||||
store="local",
|
current_local_fw_rules = win_lgpo_netsh.get_settings(
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.destructive_test
|
|
||||||
def test_set_firewall_settings_fwrules_lgpo_notconfigured():
|
|
||||||
current = win_lgpo_netsh.get_settings(
|
|
||||||
profile="domain", section="settings", store="lgpo"
|
profile="domain", section="settings", store="lgpo"
|
||||||
)["LocalFirewallRules"]
|
)["LocalFirewallRules"]
|
||||||
try:
|
try:
|
||||||
ret = win_lgpo_netsh.set_settings(
|
ret = win_lgpo_netsh.set_settings(
|
||||||
profile="domain",
|
profile="domain",
|
||||||
setting="localfirewallrules",
|
setting="localfirewallrules",
|
||||||
value="notconfigured",
|
value=allow_inbound,
|
||||||
store="lgpo",
|
store=store,
|
||||||
)
|
)
|
||||||
assert ret is True
|
assert ret is True
|
||||||
new = win_lgpo_netsh.get_settings(
|
new = win_lgpo_netsh.get_settings(
|
||||||
profile="domain", section="settings", store="lgpo"
|
profile="domain", section="settings", store=store
|
||||||
)["LocalFirewallRules"]
|
)["LocalFirewallRules"]
|
||||||
assert new == "NotConfigured"
|
assert new.lower() == allow_inbound.lower()
|
||||||
finally:
|
ret = win_lgpo_netsh.set_state(profile="domain", state=state, store=store)
|
||||||
ret = win_lgpo_netsh.set_settings(
|
|
||||||
profile="domain",
|
|
||||||
setting="localfirewallrules",
|
|
||||||
value=current,
|
|
||||||
store="lgpo",
|
|
||||||
)
|
|
||||||
assert ret is True
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.destructive_test
|
|
||||||
def test_set_firewall_settings_consecrules_local_enable():
|
|
||||||
pytest.raises(
|
|
||||||
CommandExecutionError,
|
|
||||||
win_lgpo_netsh.set_settings,
|
|
||||||
profile="domain",
|
|
||||||
setting="localconsecrules",
|
|
||||||
value="enable",
|
|
||||||
store="local",
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def test_set_firewall_settings_notification_local_enable():
|
|
||||||
current = win_lgpo_netsh.get_settings(
|
|
||||||
profile="domain", section="settings", store="local"
|
|
||||||
)["InboundUserNotification"]
|
|
||||||
try:
|
|
||||||
ret = win_lgpo_netsh.set_settings(
|
|
||||||
profile="domain",
|
|
||||||
setting="inboundusernotification",
|
|
||||||
value="enable",
|
|
||||||
store="local",
|
|
||||||
)
|
|
||||||
assert ret is True
|
assert ret is True
|
||||||
new = win_lgpo_netsh.get_settings(
|
new = win_lgpo_netsh.get_settings(
|
||||||
profile="domain", section="settings", store="local"
|
profile="domain", section="state", store=store
|
||||||
)["InboundUserNotification"]
|
|
||||||
assert new == "Enable"
|
|
||||||
finally:
|
|
||||||
ret = win_lgpo_netsh.set_settings(
|
|
||||||
profile="domain",
|
|
||||||
setting="inboundusernotification",
|
|
||||||
value=current,
|
|
||||||
store="local",
|
|
||||||
)
|
|
||||||
assert ret is True
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.destructive_test
|
|
||||||
def test_set_firewall_settings_notification_local_notconfigured():
|
|
||||||
current = win_lgpo_netsh.get_settings(
|
|
||||||
profile="domain", section="settings", store="local"
|
|
||||||
)["InboundUserNotification"]
|
|
||||||
try:
|
|
||||||
pytest.raises(
|
|
||||||
CommandExecutionError,
|
|
||||||
win_lgpo_netsh.set_settings,
|
|
||||||
profile="domain",
|
|
||||||
setting="inboundusernotification",
|
|
||||||
value="notconfigured",
|
|
||||||
store="local",
|
|
||||||
)
|
|
||||||
finally:
|
|
||||||
ret = win_lgpo_netsh.set_settings(
|
|
||||||
profile="domain",
|
|
||||||
setting="inboundusernotification",
|
|
||||||
value=current,
|
|
||||||
store="local",
|
|
||||||
)
|
|
||||||
assert ret is True
|
|
||||||
|
|
||||||
|
|
||||||
def test_set_firewall_settings_notification_lgpo_notconfigured():
|
|
||||||
current = win_lgpo_netsh.get_settings(
|
|
||||||
profile="domain", section="settings", store="lgpo"
|
|
||||||
)["InboundUserNotification"]
|
|
||||||
try:
|
|
||||||
ret = win_lgpo_netsh.set_settings(
|
|
||||||
profile="domain",
|
|
||||||
setting="inboundusernotification",
|
|
||||||
value="notconfigured",
|
|
||||||
store="lgpo",
|
|
||||||
)
|
|
||||||
assert ret is True
|
|
||||||
new = win_lgpo_netsh.get_settings(
|
|
||||||
profile="domain", section="settings", store="lgpo"
|
|
||||||
)["InboundUserNotification"]
|
|
||||||
assert new == "NotConfigured"
|
|
||||||
finally:
|
|
||||||
ret = win_lgpo_netsh.set_settings(
|
|
||||||
profile="domain",
|
|
||||||
setting="inboundusernotification",
|
|
||||||
value=current,
|
|
||||||
store="lgpo",
|
|
||||||
)
|
|
||||||
assert ret is True
|
|
||||||
|
|
||||||
|
|
||||||
def test_set_firewall_settings_unicast_local_disable():
|
|
||||||
current = win_lgpo_netsh.get_settings(
|
|
||||||
profile="domain", section="settings", store="local"
|
|
||||||
)["UnicastResponseToMulticast"]
|
|
||||||
try:
|
|
||||||
ret = win_lgpo_netsh.set_settings(
|
|
||||||
profile="domain",
|
|
||||||
setting="unicastresponsetomulticast",
|
|
||||||
value="disable",
|
|
||||||
store="local",
|
|
||||||
)
|
|
||||||
assert ret is True
|
|
||||||
new = win_lgpo_netsh.get_settings(
|
|
||||||
profile="domain", section="settings", store="local"
|
|
||||||
)["UnicastResponseToMulticast"]
|
|
||||||
assert new == "Disable"
|
|
||||||
finally:
|
|
||||||
ret = win_lgpo_netsh.set_settings(
|
|
||||||
profile="domain",
|
|
||||||
setting="unicastresponsetomulticast",
|
|
||||||
value=current,
|
|
||||||
store="local",
|
|
||||||
)
|
|
||||||
assert ret is True
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.destructive_test
|
|
||||||
def test_set_firewall_state_local_on():
|
|
||||||
current = win_lgpo_netsh.get_settings(
|
|
||||||
profile="domain", section="state", store="local"
|
|
||||||
)["State"]
|
|
||||||
try:
|
|
||||||
ret = win_lgpo_netsh.set_state(profile="domain", state="off", store="local")
|
|
||||||
assert ret is True
|
|
||||||
new = win_lgpo_netsh.get_settings(
|
|
||||||
profile="domain", section="state", store="local"
|
|
||||||
)["State"]
|
)["State"]
|
||||||
assert new == "OFF"
|
assert new.lower() == state.lower()
|
||||||
finally:
|
finally:
|
||||||
ret = win_lgpo_netsh.set_state(profile="domain", state=current, store="local")
|
if current_local_fw_rules.lower() != "notconfigured":
|
||||||
assert ret is True
|
win_lgpo_netsh.set_settings(
|
||||||
|
profile="domain",
|
||||||
|
setting="localfirewallrules",
|
||||||
@pytest.mark.destructive_test
|
value=current_local_fw_rules,
|
||||||
def test_set_firewall_state_local_notconfigured():
|
store=store,
|
||||||
current = win_lgpo_netsh.get_settings(
|
)
|
||||||
profile="domain", section="state", store="local"
|
win_lgpo_netsh.set_state(profile="domain", state=current_state, store=store)
|
||||||
)["State"]
|
|
||||||
try:
|
|
||||||
ret = win_lgpo_netsh.set_state(
|
|
||||||
profile="domain",
|
|
||||||
state="notconfigured",
|
|
||||||
store="local",
|
|
||||||
)
|
|
||||||
assert ret is True
|
|
||||||
new = win_lgpo_netsh.get_settings(
|
|
||||||
profile="domain", section="state", store="local"
|
|
||||||
)["State"]
|
|
||||||
assert new == "NotConfigured"
|
|
||||||
finally:
|
|
||||||
ret = win_lgpo_netsh.set_state(profile="domain", state=current, store="local")
|
|
||||||
assert ret is True
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.destructive_test
|
|
||||||
def test_set_firewall_state_lgpo_notconfigured():
|
|
||||||
current = win_lgpo_netsh.get_settings(
|
|
||||||
profile="domain", section="state", store="local"
|
|
||||||
)["State"]
|
|
||||||
try:
|
|
||||||
ret = win_lgpo_netsh.set_state(
|
|
||||||
profile="domain", state="notconfigured", store="lgpo"
|
|
||||||
)
|
|
||||||
assert ret is True
|
|
||||||
new = win_lgpo_netsh.get_settings(
|
|
||||||
profile="domain", section="state", store="lgpo"
|
|
||||||
)["State"]
|
|
||||||
assert new == "NotConfigured"
|
|
||||||
finally:
|
|
||||||
ret = win_lgpo_netsh.set_state(profile="domain", state=current, store="lgpo")
|
|
||||||
assert ret is True
|
|
||||||
|
|
873
tools/ci.py
873
tools/ci.py
|
@ -349,198 +349,6 @@ class TestRun(TypedDict):
|
||||||
selected_tests: NotRequired[dict[str, bool]]
|
selected_tests: NotRequired[dict[str, bool]]
|
||||||
|
|
||||||
|
|
||||||
@ci.command(
|
|
||||||
name="define-testrun",
|
|
||||||
arguments={
|
|
||||||
"event_name": {
|
|
||||||
"help": "The name of the GitHub event being processed.",
|
|
||||||
},
|
|
||||||
"changed_files": {
|
|
||||||
"help": (
|
|
||||||
"Path to '.json' file containing the payload of changed files "
|
|
||||||
"from the 'dorny/paths-filter' GitHub action."
|
|
||||||
),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
)
|
|
||||||
def define_testrun(ctx: Context, event_name: str, changed_files: pathlib.Path):
|
|
||||||
"""
|
|
||||||
Set GH Actions outputs for what and how Salt should be tested.
|
|
||||||
"""
|
|
||||||
github_output = os.environ.get("GITHUB_OUTPUT")
|
|
||||||
if github_output is None:
|
|
||||||
ctx.warn("The 'GITHUB_OUTPUT' variable is not set.")
|
|
||||||
ctx.exit(1)
|
|
||||||
|
|
||||||
if TYPE_CHECKING:
|
|
||||||
assert github_output is not None
|
|
||||||
|
|
||||||
github_step_summary = os.environ.get("GITHUB_STEP_SUMMARY")
|
|
||||||
if github_step_summary is None:
|
|
||||||
ctx.warn("The 'GITHUB_STEP_SUMMARY' variable is not set.")
|
|
||||||
ctx.exit(1)
|
|
||||||
|
|
||||||
if TYPE_CHECKING:
|
|
||||||
assert github_step_summary is not None
|
|
||||||
|
|
||||||
labels: list[str] = []
|
|
||||||
gh_event_path = os.environ.get("GITHUB_EVENT_PATH") or None
|
|
||||||
if gh_event_path is not None:
|
|
||||||
try:
|
|
||||||
gh_event = json.loads(open(gh_event_path, encoding="utf-8").read())
|
|
||||||
except Exception as exc:
|
|
||||||
ctx.error(
|
|
||||||
f"Could not load the GH Event payload from {gh_event_path!r}:\n", exc # type: ignore[arg-type]
|
|
||||||
)
|
|
||||||
ctx.exit(1)
|
|
||||||
|
|
||||||
labels.extend(
|
|
||||||
label[0] for label in _get_pr_test_labels_from_event_payload(gh_event)
|
|
||||||
)
|
|
||||||
|
|
||||||
if "test:coverage" in labels:
|
|
||||||
ctx.info("Writing 'testrun' to the github outputs file")
|
|
||||||
# skip running code coverage for now, was False
|
|
||||||
testrun = TestRun(type="full", skip_code_coverage=True)
|
|
||||||
with open(github_output, "a", encoding="utf-8") as wfh:
|
|
||||||
wfh.write(f"testrun={json.dumps(testrun)}\n")
|
|
||||||
with open(github_step_summary, "a", encoding="utf-8") as wfh:
|
|
||||||
wfh.write(
|
|
||||||
"Full test run chosen because the label `test:coverage` is set.\n"
|
|
||||||
)
|
|
||||||
return
|
|
||||||
elif event_name != "pull_request":
|
|
||||||
# In this case, a full test run is in order
|
|
||||||
ctx.info("Writing 'testrun' to the github outputs file")
|
|
||||||
# skip running code coverage for now, was False
|
|
||||||
testrun = TestRun(type="full", skip_code_coverage=True)
|
|
||||||
with open(github_output, "a", encoding="utf-8") as wfh:
|
|
||||||
wfh.write(f"testrun={json.dumps(testrun)}\n")
|
|
||||||
|
|
||||||
with open(github_step_summary, "a", encoding="utf-8") as wfh:
|
|
||||||
wfh.write(f"Full test run chosen due to event type of `{event_name}`.\n")
|
|
||||||
return
|
|
||||||
|
|
||||||
# So, it's a pull request...
|
|
||||||
|
|
||||||
if not changed_files.exists():
|
|
||||||
ctx.error(f"The '{changed_files}' file does not exist.")
|
|
||||||
ctx.error(
|
|
||||||
"FYI, the command 'tools process-changed-files <changed-files-path>' "
|
|
||||||
"needs to run prior to this one."
|
|
||||||
)
|
|
||||||
ctx.exit(1)
|
|
||||||
try:
|
|
||||||
changed_files_contents = json.loads(changed_files.read_text())
|
|
||||||
except Exception as exc:
|
|
||||||
ctx.error(f"Could not load the changed files from '{changed_files}': {exc}")
|
|
||||||
ctx.exit(1)
|
|
||||||
|
|
||||||
# Based on which files changed, or other things like PR labels we can
|
|
||||||
# decide what to run, or even if the full test run should be running on the
|
|
||||||
# pull request, etc...
|
|
||||||
changed_pkg_requirements_files = json.loads(
|
|
||||||
changed_files_contents["pkg_requirements_files"]
|
|
||||||
)
|
|
||||||
changed_test_requirements_files = json.loads(
|
|
||||||
changed_files_contents["test_requirements_files"]
|
|
||||||
)
|
|
||||||
if changed_files_contents["golden_images"] == "true":
|
|
||||||
with open(github_step_summary, "a", encoding="utf-8") as wfh:
|
|
||||||
wfh.write(
|
|
||||||
"Full test run chosen because there was a change made "
|
|
||||||
"to `cicd/golden-images.json`.\n"
|
|
||||||
)
|
|
||||||
testrun = TestRun(type="full", skip_code_coverage=True)
|
|
||||||
elif changed_pkg_requirements_files or changed_test_requirements_files:
|
|
||||||
with open(github_step_summary, "a", encoding="utf-8") as wfh:
|
|
||||||
wfh.write(
|
|
||||||
"Full test run chosen because there was a change made "
|
|
||||||
"to the requirements files.\n"
|
|
||||||
)
|
|
||||||
wfh.write(
|
|
||||||
"<details>\n<summary>Changed Requirements Files (click me)</summary>\n<pre>\n"
|
|
||||||
)
|
|
||||||
for path in sorted(
|
|
||||||
changed_pkg_requirements_files + changed_test_requirements_files
|
|
||||||
):
|
|
||||||
wfh.write(f"{path}\n")
|
|
||||||
wfh.write("</pre>\n</details>\n")
|
|
||||||
testrun = TestRun(type="full", skip_code_coverage=True)
|
|
||||||
elif "test:full" in labels:
|
|
||||||
with open(github_step_summary, "a", encoding="utf-8") as wfh:
|
|
||||||
wfh.write("Full test run chosen because the label `test:full` is set.\n")
|
|
||||||
testrun = TestRun(type="full", skip_code_coverage=True)
|
|
||||||
else:
|
|
||||||
testrun_changed_files_path = tools.utils.REPO_ROOT / "testrun-changed-files.txt"
|
|
||||||
testrun = TestRun(
|
|
||||||
type="changed",
|
|
||||||
skip_code_coverage=True,
|
|
||||||
from_filenames=str(
|
|
||||||
testrun_changed_files_path.relative_to(tools.utils.REPO_ROOT)
|
|
||||||
),
|
|
||||||
)
|
|
||||||
ctx.info(f"Writing {testrun_changed_files_path.name} ...")
|
|
||||||
selected_changed_files = []
|
|
||||||
for fpath in json.loads(changed_files_contents["testrun_files"]):
|
|
||||||
if fpath.startswith(("tools/", "tasks/")):
|
|
||||||
continue
|
|
||||||
if fpath in ("noxfile.py",):
|
|
||||||
continue
|
|
||||||
if fpath == "tests/conftest.py":
|
|
||||||
# In this particular case, just run the full test suite
|
|
||||||
testrun["type"] = "full"
|
|
||||||
with open(github_step_summary, "a", encoding="utf-8") as wfh:
|
|
||||||
wfh.write(
|
|
||||||
f"Full test run chosen because there was a change to `{fpath}`.\n"
|
|
||||||
)
|
|
||||||
selected_changed_files.append(fpath)
|
|
||||||
testrun_changed_files_path.write_text("\n".join(sorted(selected_changed_files)))
|
|
||||||
if testrun["type"] == "changed":
|
|
||||||
with open(github_step_summary, "a", encoding="utf-8") as wfh:
|
|
||||||
wfh.write("Partial test run chosen.\n")
|
|
||||||
testrun["selected_tests"] = {
|
|
||||||
"core": False,
|
|
||||||
"slow": False,
|
|
||||||
"fast": True,
|
|
||||||
"flaky": False,
|
|
||||||
}
|
|
||||||
if "test:slow" in labels:
|
|
||||||
with open(github_step_summary, "a", encoding="utf-8") as wfh:
|
|
||||||
wfh.write("Slow tests chosen by `test:slow` label.\n")
|
|
||||||
testrun["selected_tests"]["slow"] = True
|
|
||||||
if "test:core" in labels:
|
|
||||||
with open(github_step_summary, "a", encoding="utf-8") as wfh:
|
|
||||||
wfh.write("Core tests chosen by `test:core` label.\n")
|
|
||||||
testrun["selected_tests"]["core"] = True
|
|
||||||
if "test:no-fast" in labels:
|
|
||||||
with open(github_step_summary, "a", encoding="utf-8") as wfh:
|
|
||||||
wfh.write("Fast tests deselected by `test:no-fast` label.\n")
|
|
||||||
testrun["selected_tests"]["fast"] = False
|
|
||||||
if "test:flaky-jail" in labels:
|
|
||||||
with open(github_step_summary, "a", encoding="utf-8") as wfh:
|
|
||||||
wfh.write("Flaky jailed tests chosen by `test:flaky-jail` label.\n")
|
|
||||||
testrun["selected_tests"]["flaky"] = True
|
|
||||||
if selected_changed_files:
|
|
||||||
with open(github_step_summary, "a", encoding="utf-8") as wfh:
|
|
||||||
wfh.write(
|
|
||||||
"<details>\n<summary>Selected Changed Files (click me)</summary>\n<pre>\n"
|
|
||||||
)
|
|
||||||
for path in sorted(selected_changed_files):
|
|
||||||
wfh.write(f"{path}\n")
|
|
||||||
wfh.write("</pre>\n</details>\n")
|
|
||||||
|
|
||||||
with open(github_step_summary, "a", encoding="utf-8") as wfh:
|
|
||||||
wfh.write("<details>\n<summary>All Changed Files (click me)</summary>\n<pre>\n")
|
|
||||||
for path in sorted(json.loads(changed_files_contents["repo_files"])):
|
|
||||||
wfh.write(f"{path}\n")
|
|
||||||
wfh.write("</pre>\n</details>\n")
|
|
||||||
|
|
||||||
ctx.info("Writing 'testrun' to the github outputs file:\n", testrun)
|
|
||||||
with open(github_output, "a", encoding="utf-8") as wfh:
|
|
||||||
wfh.write(f"testrun={json.dumps(testrun)}\n")
|
|
||||||
|
|
||||||
|
|
||||||
def _build_matrix(os_kind, linux_arm_runner):
|
def _build_matrix(os_kind, linux_arm_runner):
|
||||||
"""
|
"""
|
||||||
Generate matrix for build ci/cd steps.
|
Generate matrix for build ci/cd steps.
|
||||||
|
@ -558,466 +366,6 @@ def _build_matrix(os_kind, linux_arm_runner):
|
||||||
return _matrix
|
return _matrix
|
||||||
|
|
||||||
|
|
||||||
@ci.command(
|
|
||||||
arguments={
|
|
||||||
"distro_slug": {
|
|
||||||
"help": "The distribution slug to generate the matrix for",
|
|
||||||
},
|
|
||||||
"full": {
|
|
||||||
"help": "Full test run",
|
|
||||||
},
|
|
||||||
"workflow": {
|
|
||||||
"help": "Which workflow is running",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
)
|
|
||||||
def matrix(
|
|
||||||
ctx: Context,
|
|
||||||
distro_slug: str,
|
|
||||||
full: bool = False,
|
|
||||||
workflow: str = "ci",
|
|
||||||
):
|
|
||||||
"""
|
|
||||||
Generate the test matrix.
|
|
||||||
"""
|
|
||||||
gh_event_path = os.environ.get("GITHUB_EVENT_PATH") or None
|
|
||||||
if gh_event_path is None:
|
|
||||||
ctx.warn("The 'GITHUB_EVENT_PATH' variable is not set.")
|
|
||||||
ctx.exit(1)
|
|
||||||
|
|
||||||
if TYPE_CHECKING:
|
|
||||||
assert gh_event_path is not None
|
|
||||||
|
|
||||||
gh_event = None
|
|
||||||
try:
|
|
||||||
gh_event = json.loads(open(gh_event_path, encoding="utf-8").read())
|
|
||||||
except Exception as exc:
|
|
||||||
ctx.error(f"Could not load the GH Event payload from {gh_event_path!r}:\n", exc)
|
|
||||||
ctx.exit(1)
|
|
||||||
|
|
||||||
if TYPE_CHECKING:
|
|
||||||
assert gh_event is not None
|
|
||||||
|
|
||||||
_matrix = []
|
|
||||||
_splits = {
|
|
||||||
"functional": 4,
|
|
||||||
"integration": 7,
|
|
||||||
"scenarios": 1,
|
|
||||||
"unit": 4,
|
|
||||||
}
|
|
||||||
for transport in ("zeromq", "tcp"):
|
|
||||||
if transport == "tcp":
|
|
||||||
if distro_slug not in (
|
|
||||||
"rockylinux-9",
|
|
||||||
"rockylinux-9-arm64",
|
|
||||||
"photonos-5",
|
|
||||||
"photonos-5-arm64",
|
|
||||||
"ubuntu-22.04",
|
|
||||||
"ubuntu-22.04-arm64",
|
|
||||||
):
|
|
||||||
# Only run TCP transport tests on these distributions
|
|
||||||
continue
|
|
||||||
for chunk in ("unit", "functional", "integration", "scenarios"):
|
|
||||||
if transport == "tcp" and chunk in ("unit", "functional"):
|
|
||||||
# Only integration and scenarios shall be tested under TCP,
|
|
||||||
# the rest would be repeating tests
|
|
||||||
continue
|
|
||||||
if "macos" in distro_slug and chunk == "scenarios":
|
|
||||||
continue
|
|
||||||
splits = _splits.get(chunk) or 1
|
|
||||||
if full and splits > 1:
|
|
||||||
for split in range(1, splits + 1):
|
|
||||||
_matrix.append(
|
|
||||||
{
|
|
||||||
"transport": transport,
|
|
||||||
"tests-chunk": chunk,
|
|
||||||
"test-group": split,
|
|
||||||
"test-group-count": splits,
|
|
||||||
}
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
_matrix.append({"transport": transport, "tests-chunk": chunk})
|
|
||||||
|
|
||||||
ctx.info("Generated matrix:")
|
|
||||||
if not _matrix:
|
|
||||||
ctx.print(" * `None`")
|
|
||||||
else:
|
|
||||||
for entry in _matrix:
|
|
||||||
ctx.print(" * ", entry, soft_wrap=True)
|
|
||||||
|
|
||||||
if (
|
|
||||||
gh_event["repository"]["fork"] is True
|
|
||||||
and "macos" in distro_slug
|
|
||||||
and "arm64" in distro_slug
|
|
||||||
):
|
|
||||||
ctx.warn("Forks don't have access to MacOS 13 Arm64. Clearning the matrix.")
|
|
||||||
_matrix.clear()
|
|
||||||
|
|
||||||
if not _matrix:
|
|
||||||
build_reports = False
|
|
||||||
ctx.info("Not building reports because the matrix is empty")
|
|
||||||
else:
|
|
||||||
build_reports = True
|
|
||||||
|
|
||||||
github_output = os.environ.get("GITHUB_OUTPUT")
|
|
||||||
if github_output is not None:
|
|
||||||
with open(github_output, "a", encoding="utf-8") as wfh:
|
|
||||||
wfh.write(f"matrix={json.dumps(_matrix)}\n")
|
|
||||||
wfh.write(f"build-reports={json.dumps(build_reports)}\n")
|
|
||||||
ctx.exit(0)
|
|
||||||
|
|
||||||
|
|
||||||
@ci.command(
|
|
||||||
name="pkg-matrix",
|
|
||||||
arguments={
|
|
||||||
"distro_slug": {
|
|
||||||
"help": "The distribution slug to generate the matrix for",
|
|
||||||
},
|
|
||||||
"pkg_type": {
|
|
||||||
"help": "The type of package we are testing against",
|
|
||||||
},
|
|
||||||
"testing_releases": {
|
|
||||||
"help": "The salt releases to test upgrades against",
|
|
||||||
"nargs": "+",
|
|
||||||
"required": True,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
)
|
|
||||||
def pkg_matrix(
|
|
||||||
ctx: Context,
|
|
||||||
distro_slug: str,
|
|
||||||
pkg_type: str,
|
|
||||||
testing_releases: list[tools.utils.Version] = None,
|
|
||||||
):
|
|
||||||
"""
|
|
||||||
Generate the test matrix.
|
|
||||||
"""
|
|
||||||
gh_event = None
|
|
||||||
gh_event_path = os.environ.get("GITHUB_EVENT_PATH") or None
|
|
||||||
if gh_event_path is None:
|
|
||||||
ctx.warn("The 'GITHUB_EVENT_PATH' variable is not set.")
|
|
||||||
ctx.exit(1)
|
|
||||||
|
|
||||||
if TYPE_CHECKING:
|
|
||||||
assert gh_event_path is not None
|
|
||||||
|
|
||||||
try:
|
|
||||||
gh_event = json.loads(open(gh_event_path, encoding="utf-8").read())
|
|
||||||
except Exception as exc:
|
|
||||||
ctx.error(f"Could not load the GH Event payload from {gh_event_path!r}:\n", exc)
|
|
||||||
ctx.exit(1)
|
|
||||||
|
|
||||||
if TYPE_CHECKING:
|
|
||||||
assert gh_event is not None
|
|
||||||
|
|
||||||
github_output = os.environ.get("GITHUB_OUTPUT")
|
|
||||||
if github_output is None:
|
|
||||||
ctx.warn("The 'GITHUB_OUTPUT' variable is not set.")
|
|
||||||
|
|
||||||
if TYPE_CHECKING:
|
|
||||||
assert testing_releases
|
|
||||||
|
|
||||||
adjusted_versions = []
|
|
||||||
for ver in testing_releases:
|
|
||||||
adjusted_versions.append((ver, "relenv"))
|
|
||||||
ctx.info(f"Will look for the following versions: {adjusted_versions}")
|
|
||||||
|
|
||||||
# Filter out the prefixes to look under
|
|
||||||
if "macos-" in distro_slug:
|
|
||||||
# We don't have golden images for macos, handle these separately
|
|
||||||
prefixes = {
|
|
||||||
"classic": "osx/",
|
|
||||||
"tiamat": "salt/py3/macos/minor/",
|
|
||||||
"relenv": "salt/py3/macos/minor/",
|
|
||||||
}
|
|
||||||
name = "macos"
|
|
||||||
else:
|
|
||||||
parts = distro_slug.split("-")
|
|
||||||
name = parts[0]
|
|
||||||
version = parts[1]
|
|
||||||
|
|
||||||
if len(parts) > 2:
|
|
||||||
arch = parts[2]
|
|
||||||
elif name in ("debian", "ubuntu"):
|
|
||||||
arch = "amd64"
|
|
||||||
else:
|
|
||||||
arch = "x86_64"
|
|
||||||
|
|
||||||
ctx.info(f"Parsed linux slug parts {name} {version} {arch}")
|
|
||||||
|
|
||||||
if name == "amazonlinux":
|
|
||||||
name = "amazon"
|
|
||||||
elif name == "rockylinux":
|
|
||||||
name = "redhat"
|
|
||||||
elif "photon" in name:
|
|
||||||
name = "photon"
|
|
||||||
|
|
||||||
if name == "windows":
|
|
||||||
prefixes = {
|
|
||||||
"classic": "windows/",
|
|
||||||
"tiamat": "salt/py3/windows/minor",
|
|
||||||
"relenv": "salt/py3/windows/minor",
|
|
||||||
}
|
|
||||||
else:
|
|
||||||
prefixes = {
|
|
||||||
"classic": f"py3/{name}/{version}/{arch}/",
|
|
||||||
"tiamat": f"salt/py3/{name}/{version}/{arch}/minor/",
|
|
||||||
"relenv": f"salt/py3/{name}/{version}/{arch}/minor/",
|
|
||||||
}
|
|
||||||
_matrix = []
|
|
||||||
|
|
||||||
# XXX: fetch versions
|
|
||||||
# s3 = boto3.client("s3")
|
|
||||||
# paginator = s3.get_paginator("list_objects_v2")
|
|
||||||
_matrix = [
|
|
||||||
{
|
|
||||||
"tests-chunk": "install",
|
|
||||||
"version": None,
|
|
||||||
}
|
|
||||||
]
|
|
||||||
|
|
||||||
for version, backend in adjusted_versions:
|
|
||||||
prefix = prefixes[backend]
|
|
||||||
# TODO: Remove this after 3009.0
|
|
||||||
if backend == "relenv" and version >= tools.utils.Version("3006.5"):
|
|
||||||
prefix.replace("/arm64/", "/aarch64/")
|
|
||||||
# Using a paginator allows us to list recursively and avoid the item limit
|
|
||||||
# page_iterator = paginator.paginate(
|
|
||||||
# Bucket=f"salt-project-{tools.utils.SPB_ENVIRONMENT}-salt-artifacts-release",
|
|
||||||
# Prefix=prefix,
|
|
||||||
# )
|
|
||||||
# Uses a jmespath expression to test if the wanted version is in any of the filenames
|
|
||||||
# key_filter = f"Contents[?contains(Key, '{version}')][]"
|
|
||||||
# if pkg_type == "MSI":
|
|
||||||
# # TODO: Add this back when we add MSI upgrade and downgrade tests
|
|
||||||
# # key_filter = f"Contents[?contains(Key, '{version}')] | [?ends_with(Key, '.msi')]"
|
|
||||||
# continue
|
|
||||||
# elif pkg_type == "NSIS":
|
|
||||||
# key_filter = (
|
|
||||||
# f"Contents[?contains(Key, '{version}')] | [?ends_with(Key, '.exe')]"
|
|
||||||
# )
|
|
||||||
# continue
|
|
||||||
# objects = list(page_iterator.search(key_filter))
|
|
||||||
# Testing using `any` because sometimes the paginator returns `[None]`
|
|
||||||
# if any(objects):
|
|
||||||
# ctx.info(
|
|
||||||
# f"Found {version} ({backend}) for {distro_slug}: {objects[0]['Key']}"
|
|
||||||
# )
|
|
||||||
# for session in ("upgrade", "downgrade"):
|
|
||||||
# if backend == "classic":
|
|
||||||
# session += "-classic"
|
|
||||||
# _matrix.append(
|
|
||||||
# {
|
|
||||||
# "tests-chunk": session,
|
|
||||||
# "version": str(version),
|
|
||||||
# }
|
|
||||||
# )
|
|
||||||
# else:
|
|
||||||
# ctx.info(f"No {version} ({backend}) for {distro_slug} at {prefix}")
|
|
||||||
if name == "windows":
|
|
||||||
sessions = [
|
|
||||||
"upgrade",
|
|
||||||
]
|
|
||||||
else:
|
|
||||||
sessions = ["upgrade", "downgrade"]
|
|
||||||
for session in sessions:
|
|
||||||
_matrix.append(
|
|
||||||
{
|
|
||||||
"tests-chunk": session,
|
|
||||||
"version": str(version),
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
ctx.info("Generated matrix:")
|
|
||||||
if not _matrix:
|
|
||||||
ctx.print(" * `None`")
|
|
||||||
else:
|
|
||||||
for entry in _matrix:
|
|
||||||
ctx.print(" * ", entry, soft_wrap=True)
|
|
||||||
|
|
||||||
# if (
|
|
||||||
# gh_event["repository"]["fork"] is True
|
|
||||||
# and "macos" in distro_slug
|
|
||||||
# and "arm64" in distro_slug
|
|
||||||
# ):
|
|
||||||
# # XXX: This should work now
|
|
||||||
# ctx.warn("Forks don't have access to MacOS 13 Arm64. Clearning the matrix.")
|
|
||||||
# _matrix.clear()
|
|
||||||
|
|
||||||
if (
|
|
||||||
arch == "arm64"
|
|
||||||
and name not in ["windows", "macos"]
|
|
||||||
and os.environ.get("LINUX_ARM_RUNNER", "0") not in ("0", "")
|
|
||||||
):
|
|
||||||
ctx.warn("This fork does not have a linux arm64 runner configured.")
|
|
||||||
_matrix.clear()
|
|
||||||
|
|
||||||
if not _matrix:
|
|
||||||
build_reports = False
|
|
||||||
ctx.info("Not building reports because the matrix is empty")
|
|
||||||
else:
|
|
||||||
build_reports = True
|
|
||||||
|
|
||||||
if github_output is not None:
|
|
||||||
with open(github_output, "a", encoding="utf-8") as wfh:
|
|
||||||
wfh.write(f"matrix={json.dumps(_matrix)}\n")
|
|
||||||
wfh.write(f"build-reports={json.dumps(build_reports)}\n")
|
|
||||||
ctx.exit(0)
|
|
||||||
|
|
||||||
|
|
||||||
@ci.command(name="deps-matrix")
|
|
||||||
def get_ci_deps_matrix(ctx: Context):
|
|
||||||
gh_event_path = os.environ.get("GITHUB_EVENT_PATH") or None
|
|
||||||
if gh_event_path is None:
|
|
||||||
ctx.warn("The 'GITHUB_EVENT_PATH' variable is not set.")
|
|
||||||
ctx.exit(1)
|
|
||||||
|
|
||||||
if TYPE_CHECKING:
|
|
||||||
assert gh_event_path is not None
|
|
||||||
|
|
||||||
github_output = os.environ.get("GITHUB_OUTPUT")
|
|
||||||
if github_output is None:
|
|
||||||
ctx.warn("The 'GITHUB_OUTPUT' variable is not set.")
|
|
||||||
ctx.exit(1)
|
|
||||||
|
|
||||||
if TYPE_CHECKING:
|
|
||||||
assert github_output is not None
|
|
||||||
|
|
||||||
gh_event = None
|
|
||||||
try:
|
|
||||||
gh_event = json.loads(open(gh_event_path, encoding="utf-8").read())
|
|
||||||
except Exception as exc:
|
|
||||||
ctx.error(f"Could not load the GH Event payload from {gh_event_path!r}:\n", exc)
|
|
||||||
ctx.exit(1)
|
|
||||||
|
|
||||||
if TYPE_CHECKING:
|
|
||||||
assert gh_event is not None
|
|
||||||
|
|
||||||
_matrix = {
|
|
||||||
"linux": [
|
|
||||||
{"arch": "x86_64"},
|
|
||||||
],
|
|
||||||
"macos": [
|
|
||||||
{"distro-slug": "macos-13", "arch": "x86_64"},
|
|
||||||
{"distro-slug": "macos-14", "arch": "arm64"},
|
|
||||||
],
|
|
||||||
"windows": [
|
|
||||||
{"distro-slug": "windows-2022", "arch": "amd64"},
|
|
||||||
],
|
|
||||||
}
|
|
||||||
if os.environ.get("LINUX_ARM_RUNNER", "0") not in ("0", ""):
|
|
||||||
_matrix["linux"].append({"arch": "arm64"})
|
|
||||||
|
|
||||||
ctx.info("Generated matrix:")
|
|
||||||
ctx.print(_matrix, soft_wrap=True)
|
|
||||||
|
|
||||||
if github_output is not None:
|
|
||||||
with open(github_output, "a", encoding="utf-8") as wfh:
|
|
||||||
wfh.write(f"matrix={json.dumps(_matrix)}\n")
|
|
||||||
ctx.exit(0)
|
|
||||||
|
|
||||||
|
|
||||||
@ci.command(name="pkg-downloads-matrix")
|
|
||||||
def get_pkg_downloads_matrix(ctx: Context):
|
|
||||||
gh_event_path = os.environ.get("GITHUB_EVENT_PATH") or None
|
|
||||||
if gh_event_path is None:
|
|
||||||
ctx.warn("The 'GITHUB_EVENT_PATH' variable is not set.")
|
|
||||||
ctx.exit(1)
|
|
||||||
|
|
||||||
if TYPE_CHECKING:
|
|
||||||
assert gh_event_path is not None
|
|
||||||
|
|
||||||
github_output = os.environ.get("GITHUB_OUTPUT")
|
|
||||||
if github_output is None:
|
|
||||||
ctx.warn("The 'GITHUB_OUTPUT' variable is not set.")
|
|
||||||
ctx.exit(1)
|
|
||||||
|
|
||||||
if TYPE_CHECKING:
|
|
||||||
assert github_output is not None
|
|
||||||
|
|
||||||
gh_event = None
|
|
||||||
try:
|
|
||||||
gh_event = json.loads(open(gh_event_path, encoding="utf-8").read())
|
|
||||||
except Exception as exc:
|
|
||||||
ctx.error(f"Could not load the GH Event payload from {gh_event_path!r}:\n", exc)
|
|
||||||
ctx.exit(1)
|
|
||||||
|
|
||||||
if TYPE_CHECKING:
|
|
||||||
assert gh_event is not None
|
|
||||||
|
|
||||||
_matrix: dict[str, list[dict[str, str]]] = {
|
|
||||||
"linux": [],
|
|
||||||
"macos": [],
|
|
||||||
"windows": [],
|
|
||||||
}
|
|
||||||
|
|
||||||
rpm_slugs = (
|
|
||||||
"rockylinux",
|
|
||||||
"amazonlinux",
|
|
||||||
"fedora",
|
|
||||||
"photon",
|
|
||||||
)
|
|
||||||
linux_skip_pkg_download_tests = (
|
|
||||||
"opensuse-15",
|
|
||||||
"windows",
|
|
||||||
)
|
|
||||||
for slug in sorted(tools.utils.get_golden_images()):
|
|
||||||
if slug.startswith(linux_skip_pkg_download_tests):
|
|
||||||
continue
|
|
||||||
if "arm64" in slug:
|
|
||||||
arch = "arm64"
|
|
||||||
else:
|
|
||||||
arch = "x86_64"
|
|
||||||
if slug.startswith(rpm_slugs) and arch == "arm64":
|
|
||||||
# While we maintain backwards compatible urls
|
|
||||||
_matrix["linux"].append(
|
|
||||||
{"distro-slug": slug, "arch": "aarch64", "pkg-type": "package"}
|
|
||||||
)
|
|
||||||
_matrix["linux"].append(
|
|
||||||
{"distro-slug": slug, "arch": arch, "pkg-type": "package"}
|
|
||||||
)
|
|
||||||
if slug.startswith("ubuntu-22"):
|
|
||||||
_matrix["linux"].append(
|
|
||||||
{"distro-slug": slug, "arch": arch, "pkg-type": "onedir"}
|
|
||||||
)
|
|
||||||
for mac in TEST_SALT_LISTING["macos"]:
|
|
||||||
if gh_event["repository"]["fork"] is True and mac.arch == "arm64":
|
|
||||||
continue
|
|
||||||
_matrix["macos"].append(
|
|
||||||
{"distro-slug": mac.slug, "arch": mac.arch, "pkg-type": "package"}
|
|
||||||
)
|
|
||||||
|
|
||||||
if gh_event["repository"]["fork"] is True:
|
|
||||||
macos_idx = 0 # macos-12
|
|
||||||
else:
|
|
||||||
macos_idx = 1 # macos-13
|
|
||||||
_matrix["macos"].append(
|
|
||||||
{
|
|
||||||
"distro-slug": TEST_SALT_LISTING["macos"][macos_idx].slug,
|
|
||||||
"arch": TEST_SALT_LISTING["macos"][macos_idx].arch,
|
|
||||||
"pkg-type": "onedir",
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
for win in TEST_SALT_LISTING["windows"][-1:]:
|
|
||||||
for pkg_type in ("nsis", "msi", "onedir"):
|
|
||||||
_matrix["windows"].append(
|
|
||||||
{
|
|
||||||
"distro-slug": win.slug,
|
|
||||||
"arch": win.arch,
|
|
||||||
"pkg-type": pkg_type,
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
ctx.info("Generated matrix:")
|
|
||||||
ctx.print(_matrix, soft_wrap=True)
|
|
||||||
|
|
||||||
if github_output is not None:
|
|
||||||
with open(github_output, "a", encoding="utf-8") as wfh:
|
|
||||||
wfh.write(f"matrix={json.dumps(_matrix)}\n")
|
|
||||||
ctx.exit(0)
|
|
||||||
|
|
||||||
|
|
||||||
@ci.command(
|
@ci.command(
|
||||||
name="get-releases",
|
name="get-releases",
|
||||||
arguments={
|
arguments={
|
||||||
|
@ -1530,10 +878,12 @@ def upload_coverage(ctx: Context, reports_path: pathlib.Path, commit_sha: str =
|
||||||
ctx.exit(0)
|
ctx.exit(0)
|
||||||
|
|
||||||
|
|
||||||
def _os_test_filter(osdef, transport, chunk, arm_runner):
|
def _os_test_filter(osdef, transport, chunk, arm_runner, requested_slugs):
|
||||||
"""
|
"""
|
||||||
Filter out some test runs based on os, tranport and chunk to be run.
|
Filter out some test runs based on os, tranport and chunk to be run.
|
||||||
"""
|
"""
|
||||||
|
if osdef.slug not in requested_slugs:
|
||||||
|
return False
|
||||||
if transport == "tcp" and chunk in ("unit", "functional"):
|
if transport == "tcp" and chunk in ("unit", "functional"):
|
||||||
return False
|
return False
|
||||||
if "macos" in osdef.slug and chunk == "scenarios":
|
if "macos" in osdef.slug and chunk == "scenarios":
|
||||||
|
@ -1552,6 +902,160 @@ def _os_test_filter(osdef, transport, chunk, arm_runner):
|
||||||
return True
|
return True
|
||||||
|
|
||||||
|
|
||||||
|
def _define_testrun(ctx, changed_files, labels, full):
|
||||||
|
if not changed_files.exists():
|
||||||
|
ctx.error(f"The '{changed_files}' file does not exist.")
|
||||||
|
ctx.error(
|
||||||
|
"FYI, the command 'tools process-changed-files <changed-files-path>' "
|
||||||
|
"needs to run prior to this one."
|
||||||
|
)
|
||||||
|
ctx.exit(1)
|
||||||
|
try:
|
||||||
|
changed_files_contents = json.loads(changed_files.read_text())
|
||||||
|
except Exception as exc:
|
||||||
|
ctx.error(f"Could not load the changed files from '{changed_files}': {exc}")
|
||||||
|
ctx.exit(1)
|
||||||
|
|
||||||
|
# Based on which files changed, or other things like PR labels we can
|
||||||
|
# decide what to run, or even if the full test run should be running on the
|
||||||
|
# pull request, etc...
|
||||||
|
changed_pkg_requirements_files: list[str] = []
|
||||||
|
changed_test_requirements_files: list[str] = []
|
||||||
|
if "pkg_requirements_files" in changed_files_contents:
|
||||||
|
changed_pkg_requirements_files = json.loads(
|
||||||
|
changed_files_contents["pkg_requirements_files"]
|
||||||
|
)
|
||||||
|
if "test_requirements_files" in changed_files_contents:
|
||||||
|
changed_test_requirements_files = json.loads(
|
||||||
|
changed_files_contents["test_requirements_files"]
|
||||||
|
)
|
||||||
|
if full:
|
||||||
|
ctx.info("Full test run chosen")
|
||||||
|
testrun = TestRun(type="full", skip_code_coverage=True)
|
||||||
|
elif changed_pkg_requirements_files or changed_test_requirements_files:
|
||||||
|
ctx.info(
|
||||||
|
"Full test run chosen because there was a change made "
|
||||||
|
"to the requirements files."
|
||||||
|
)
|
||||||
|
testrun = TestRun(type="full", skip_code_coverage=True)
|
||||||
|
elif "test:full" in labels:
|
||||||
|
ctx.info("Full test run chosen because the label `test:full` is set.\n")
|
||||||
|
testrun = TestRun(type="full", skip_code_coverage=True)
|
||||||
|
else:
|
||||||
|
testrun_changed_files_path = tools.utils.REPO_ROOT / "testrun-changed-files.txt"
|
||||||
|
testrun = TestRun(
|
||||||
|
type="changed",
|
||||||
|
skip_code_coverage=True,
|
||||||
|
from_filenames=str(
|
||||||
|
testrun_changed_files_path.relative_to(tools.utils.REPO_ROOT)
|
||||||
|
),
|
||||||
|
)
|
||||||
|
ctx.info(f"Writing {testrun_changed_files_path.name} ...")
|
||||||
|
selected_changed_files = []
|
||||||
|
for fpath in json.loads(changed_files_contents["testrun_files"]):
|
||||||
|
if fpath.startswith(("tools/", "tasks/")):
|
||||||
|
continue
|
||||||
|
if fpath in ("noxfile.py",):
|
||||||
|
continue
|
||||||
|
if fpath == "tests/conftest.py":
|
||||||
|
# In this particular case, just run the full test suite
|
||||||
|
testrun["type"] = "full"
|
||||||
|
ctx.info(
|
||||||
|
f"Full test run chosen because there was a change to `{fpath}`."
|
||||||
|
)
|
||||||
|
selected_changed_files.append(fpath)
|
||||||
|
testrun_changed_files_path.write_text("\n".join(sorted(selected_changed_files)))
|
||||||
|
if testrun["type"] == "changed":
|
||||||
|
testrun["selected_tests"] = {
|
||||||
|
"core": False,
|
||||||
|
"slow": False,
|
||||||
|
"fast": True,
|
||||||
|
"flaky": False,
|
||||||
|
}
|
||||||
|
if "test:slow" in labels:
|
||||||
|
ctx.info("Slow tests chosen by `test:slow` label.")
|
||||||
|
testrun["selected_tests"]["slow"] = True
|
||||||
|
if "test:core" in labels:
|
||||||
|
ctx.info("Core tests chosen by `test:core` label.")
|
||||||
|
testrun["selected_tests"]["core"] = True
|
||||||
|
if "test:no-fast" in labels:
|
||||||
|
ctx.info("Fast tests deselected by `test:no-fast` label.")
|
||||||
|
testrun["selected_tests"]["fast"] = False
|
||||||
|
if "test:flaky-jail" in labels:
|
||||||
|
ctx.info("Flaky jailed tests chosen by `test:flaky-jail` label.")
|
||||||
|
testrun["selected_tests"]["flaky"] = True
|
||||||
|
return testrun
|
||||||
|
|
||||||
|
|
||||||
|
def _environment_slugs(ctx, slugdef, labels):
|
||||||
|
"""
|
||||||
|
Based a slugs defenition from our environment and labels for a pr, return
|
||||||
|
the requeted slugs for a testrun.
|
||||||
|
|
||||||
|
Environment slug defenitions can be a comma separated list. An "all" item
|
||||||
|
in the list will include all os and package slugs.
|
||||||
|
"""
|
||||||
|
if isinstance(slugdef, list):
|
||||||
|
requests = slugdef
|
||||||
|
else:
|
||||||
|
requests = [_.strip().lower() for _ in slugdef.split(",") if _.strip()]
|
||||||
|
label_requests = [
|
||||||
|
_[0].rsplit(":", 1)[1] for _ in labels if _[0].startswith("test:os:")
|
||||||
|
]
|
||||||
|
all_slugs = []
|
||||||
|
slugs = set()
|
||||||
|
for platform in TEST_SALT_LISTING:
|
||||||
|
for osdef in TEST_SALT_LISTING[platform]:
|
||||||
|
all_slugs.append(osdef.slug)
|
||||||
|
for platform in TEST_SALT_LISTING:
|
||||||
|
for osdef in TEST_SALT_LISTING[platform]:
|
||||||
|
all_slugs.append(osdef.slug)
|
||||||
|
if "all" in requests:
|
||||||
|
slugs = all_slugs[:]
|
||||||
|
requests.remove("all")
|
||||||
|
if "all" in label_requests:
|
||||||
|
slugs = all_slugs[:]
|
||||||
|
label_requests.remove("all")
|
||||||
|
for request in requests[:]:
|
||||||
|
if request.startswith("+"):
|
||||||
|
request = request.strip("+")
|
||||||
|
if request not in all_slugs:
|
||||||
|
ctx.warn(f"invalid slug name from environment {request}")
|
||||||
|
continue
|
||||||
|
if request in slugs:
|
||||||
|
ctx.info("slug already requested from environment {request}")
|
||||||
|
continue
|
||||||
|
slugs.add(request)
|
||||||
|
elif request.startswith("-"):
|
||||||
|
request = request.strip("-")
|
||||||
|
if request not in all_slugs:
|
||||||
|
ctx.warn(f"invalid slug name from environment {request}")
|
||||||
|
continue
|
||||||
|
if request in slugs:
|
||||||
|
slugs.remove(request)
|
||||||
|
else:
|
||||||
|
ctx.info("slug from environment was never requested {request}")
|
||||||
|
else:
|
||||||
|
if request not in all_slugs:
|
||||||
|
ctx.warn(f"invalid slug name from environment {request}")
|
||||||
|
continue
|
||||||
|
if request in slugs:
|
||||||
|
ctx.info("slug from environment already requested {request}")
|
||||||
|
continue
|
||||||
|
slugs.add(request)
|
||||||
|
|
||||||
|
for label in label_requests:
|
||||||
|
if label not in all_slugs:
|
||||||
|
ctx.warn(f"invalid slug name from label {label}")
|
||||||
|
continue
|
||||||
|
if label in slugs:
|
||||||
|
ctx.info(f"slug from labels already requested {label}")
|
||||||
|
continue
|
||||||
|
slugs.add(label)
|
||||||
|
|
||||||
|
return list(slugs)
|
||||||
|
|
||||||
|
|
||||||
@ci.command(
|
@ci.command(
|
||||||
name="workflow-config",
|
name="workflow-config",
|
||||||
arguments={
|
arguments={
|
||||||
|
@ -1589,19 +1093,17 @@ def workflow_config(
|
||||||
):
|
):
|
||||||
full = False
|
full = False
|
||||||
gh_event_path = os.environ.get("GITHUB_EVENT_PATH") or None
|
gh_event_path = os.environ.get("GITHUB_EVENT_PATH") or None
|
||||||
gh_event = None
|
gh_event: dict[str, Any] = {}
|
||||||
config: dict[str, Any] = {}
|
config: dict[str, Any] = {}
|
||||||
|
labels: list[tuple[str, str]] = []
|
||||||
|
slugs: list[str] = []
|
||||||
|
|
||||||
ctx.info(f"{'==== environment ====':^80s}")
|
ctx.info(f"{'==== environment ====':^80s}")
|
||||||
ctx.info(f"{pprint.pformat(dict(os.environ))}")
|
ctx.info(f"{pprint.pformat(dict(os.environ))}")
|
||||||
ctx.info(f"{'==== end environment ====':^80s}")
|
ctx.info(f"{'==== end environment ====':^80s}")
|
||||||
ctx.info(f"Github event path is {gh_event_path}")
|
ctx.info(f"Github event path is {gh_event_path}")
|
||||||
|
|
||||||
if event_name != "pull_request":
|
|
||||||
full = True
|
|
||||||
|
|
||||||
if gh_event_path is None:
|
if gh_event_path is None:
|
||||||
labels = []
|
|
||||||
config["linux_arm_runner"] = ""
|
config["linux_arm_runner"] = ""
|
||||||
else:
|
else:
|
||||||
try:
|
try:
|
||||||
|
@ -1616,7 +1118,6 @@ def workflow_config(
|
||||||
pr = gh_event["pull_request"]["number"]
|
pr = gh_event["pull_request"]["number"]
|
||||||
labels = _get_pr_test_labels_from_event_payload(gh_event)
|
labels = _get_pr_test_labels_from_event_payload(gh_event)
|
||||||
else:
|
else:
|
||||||
labels = []
|
|
||||||
ctx.warn("The 'pull_request' key was not found on the event payload.")
|
ctx.warn("The 'pull_request' key was not found on the event payload.")
|
||||||
|
|
||||||
if gh_event["repository"]["private"]:
|
if gh_event["repository"]["private"]:
|
||||||
|
@ -1630,14 +1131,44 @@ def workflow_config(
|
||||||
# Public repositories can use github's arm64 runners.
|
# Public repositories can use github's arm64 runners.
|
||||||
config["linux_arm_runner"] = "ubuntu-24.04-arm"
|
config["linux_arm_runner"] = "ubuntu-24.04-arm"
|
||||||
|
|
||||||
|
if event_name != "pull_request" or "test:full" in [_[0] for _ in labels]:
|
||||||
|
full = True
|
||||||
|
requested_slugs = _environment_slugs(
|
||||||
|
ctx,
|
||||||
|
tools.utils.get_cicd_shared_context()["full-testrun-slugs"],
|
||||||
|
labels,
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
requested_slugs = _environment_slugs(
|
||||||
|
ctx,
|
||||||
|
tools.utils.get_cicd_shared_context()["pr-testrun-slugs"],
|
||||||
|
labels,
|
||||||
|
)
|
||||||
|
|
||||||
|
ctx.info(f"{'==== requested slugs ====':^80s}")
|
||||||
|
ctx.info(f"{pprint.pformat(requested_slugs)}")
|
||||||
|
ctx.info(f"{'==== end requested slugs ====':^80s}")
|
||||||
|
|
||||||
ctx.info(f"{'==== labels ====':^80s}")
|
ctx.info(f"{'==== labels ====':^80s}")
|
||||||
ctx.info(f"{pprint.pformat(labels)}")
|
ctx.info(f"{pprint.pformat(labels)}")
|
||||||
ctx.info(f"{'==== end labels ====':^80s}")
|
ctx.info(f"{'==== end labels ====':^80s}")
|
||||||
|
|
||||||
|
config["skip_code_coverage"] = True
|
||||||
|
if "test:coverage" in labels:
|
||||||
|
config["skip_code_coverage"] = False
|
||||||
|
else:
|
||||||
|
ctx.info("Skipping code coverage.")
|
||||||
|
|
||||||
ctx.info(f"{'==== github event ====':^80s}")
|
ctx.info(f"{'==== github event ====':^80s}")
|
||||||
ctx.info(f"{pprint.pformat(gh_event)}")
|
ctx.info(f"{pprint.pformat(gh_event)}")
|
||||||
ctx.info(f"{'==== end github event ====':^80s}")
|
ctx.info(f"{'==== end github event ====':^80s}")
|
||||||
|
|
||||||
|
config["testrun"] = _define_testrun(ctx, changed_files, labels, full)
|
||||||
|
|
||||||
|
ctx.info(f"{'==== testrun ====':^80s}")
|
||||||
|
ctx.info(f"{pprint.pformat(config['testrun'])}")
|
||||||
|
ctx.info(f"{'==== testrun ====':^80s}")
|
||||||
|
|
||||||
jobs = {
|
jobs = {
|
||||||
"lint": True,
|
"lint": True,
|
||||||
"test": True,
|
"test": True,
|
||||||
|
@ -1649,7 +1180,7 @@ def workflow_config(
|
||||||
"build-deps-onedir": True,
|
"build-deps-onedir": True,
|
||||||
"build-salt-onedir": True,
|
"build-salt-onedir": True,
|
||||||
"build-pkgs": True,
|
"build-pkgs": True,
|
||||||
"build-deps-ci": True,
|
"build-deps-ci": True if requested_slugs else False,
|
||||||
}
|
}
|
||||||
|
|
||||||
platforms: list[Literal["linux", "macos", "windows"]] = [
|
platforms: list[Literal["linux", "macos", "windows"]] = [
|
||||||
|
@ -1730,6 +1261,7 @@ def workflow_config(
|
||||||
**_.as_dict(),
|
**_.as_dict(),
|
||||||
)
|
)
|
||||||
for _ in TEST_SALT_PKG_LISTING[platform]
|
for _ in TEST_SALT_PKG_LISTING[platform]
|
||||||
|
if _.slug in requested_slugs
|
||||||
]
|
]
|
||||||
for version in str_releases:
|
for version in str_releases:
|
||||||
for platform in platforms:
|
for platform in platforms:
|
||||||
|
@ -1742,6 +1274,7 @@ def workflow_config(
|
||||||
**_.as_dict(),
|
**_.as_dict(),
|
||||||
)
|
)
|
||||||
for _ in TEST_SALT_PKG_LISTING[platform]
|
for _ in TEST_SALT_PKG_LISTING[platform]
|
||||||
|
if _.slug in requested_slugs
|
||||||
]
|
]
|
||||||
# Skipping downgrade tests on windows. These tests have never
|
# Skipping downgrade tests on windows. These tests have never
|
||||||
# been run and currently fail. This should be fixed.
|
# been run and currently fail. This should be fixed.
|
||||||
|
@ -1756,6 +1289,7 @@ def workflow_config(
|
||||||
**_.as_dict(),
|
**_.as_dict(),
|
||||||
)
|
)
|
||||||
for _ in TEST_SALT_PKG_LISTING[platform]
|
for _ in TEST_SALT_PKG_LISTING[platform]
|
||||||
|
if _.slug in requested_slugs
|
||||||
]
|
]
|
||||||
ctx.info(f"{'==== pkg test matrix ====':^80s}")
|
ctx.info(f"{'==== pkg test matrix ====':^80s}")
|
||||||
ctx.info(f"{pprint.pformat(pkg_test_matrix)}")
|
ctx.info(f"{pprint.pformat(pkg_test_matrix)}")
|
||||||
|
@ -1793,7 +1327,11 @@ def workflow_config(
|
||||||
)
|
)
|
||||||
for _ in TEST_SALT_LISTING[platform]
|
for _ in TEST_SALT_LISTING[platform]
|
||||||
if _os_test_filter(
|
if _os_test_filter(
|
||||||
_, transport, chunk, config["linux_arm_runner"]
|
_,
|
||||||
|
transport,
|
||||||
|
chunk,
|
||||||
|
config["linux_arm_runner"],
|
||||||
|
requested_slugs,
|
||||||
)
|
)
|
||||||
]
|
]
|
||||||
else:
|
else:
|
||||||
|
@ -1816,6 +1354,7 @@ def workflow_config(
|
||||||
transport,
|
transport,
|
||||||
chunk,
|
chunk,
|
||||||
config["linux_arm_runner"],
|
config["linux_arm_runner"],
|
||||||
|
requested_slugs,
|
||||||
)
|
)
|
||||||
and _.arch == arch
|
and _.arch == arch
|
||||||
]
|
]
|
||||||
|
@ -1830,7 +1369,11 @@ def workflow_config(
|
||||||
)
|
)
|
||||||
for _ in TEST_SALT_LISTING[platform]
|
for _ in TEST_SALT_LISTING[platform]
|
||||||
if _os_test_filter(
|
if _os_test_filter(
|
||||||
_, transport, chunk, config["linux_arm_runner"]
|
_,
|
||||||
|
transport,
|
||||||
|
chunk,
|
||||||
|
config["linux_arm_runner"],
|
||||||
|
requested_slugs,
|
||||||
)
|
)
|
||||||
]
|
]
|
||||||
else:
|
else:
|
||||||
|
@ -1844,7 +1387,11 @@ def workflow_config(
|
||||||
)
|
)
|
||||||
for _ in TEST_SALT_LISTING[platform]
|
for _ in TEST_SALT_LISTING[platform]
|
||||||
if _os_test_filter(
|
if _os_test_filter(
|
||||||
_, transport, chunk, config["linux_arm_runner"]
|
_,
|
||||||
|
transport,
|
||||||
|
chunk,
|
||||||
|
config["linux_arm_runner"],
|
||||||
|
requested_slugs,
|
||||||
)
|
)
|
||||||
and _.arch == arch
|
and _.arch == arch
|
||||||
]
|
]
|
||||||
|
|
Loading…
Add table
Reference in a new issue