mirror of
https://github.com/saltstack/salt.git
synced 2025-04-17 10:10:20 +00:00
Merge pull request #63908 from s0undt3ch/hotfix/merge-forward
Merge 3006.x into master
This commit is contained in:
commit
b6b6ccde7b
22 changed files with 230 additions and 291 deletions
4
.github/workflows/nightly.yml
vendored
4
.github/workflows/nightly.yml
vendored
|
@ -55,7 +55,7 @@ jobs:
|
|||
|
||||
trigger-branch-nightly-builds:
|
||||
name: Trigger Branch Workflows
|
||||
if: ${{ github.event_name == 'schedule' }}
|
||||
if: ${{ github.event_name == 'schedule' && fromJSON(needs.workflow-requirements.outputs.requirements-met) }}
|
||||
runs-on:
|
||||
- ubuntu-latest
|
||||
needs:
|
||||
|
@ -1916,7 +1916,7 @@ jobs:
|
|||
|
||||
- name: Upload Repository Contents (nightly)
|
||||
run: |
|
||||
tools pkg repo publish nightly artifacts/pkgs/repo/
|
||||
tools pkg repo publish nightly --salt-version=${{ needs.prepare-workflow.outputs.salt-version }} artifacts/pkgs/repo/
|
||||
|
||||
set-pipeline-exit-status:
|
||||
# This step is just so we can make github require this step, to pass checks
|
||||
|
|
4
.github/workflows/release.yml
vendored
4
.github/workflows/release.yml
vendored
|
@ -140,9 +140,7 @@ jobs:
|
|||
|
||||
- name: Publish Release Repository
|
||||
run: |
|
||||
tools pkg repo publish release \
|
||||
${{ contains(needs.prepare-workflow.outputs.salt-version, 'rc') && '--rc-build' || '' }} \
|
||||
--key-id=64CBBC8173D76B3F ${{ needs.prepare-workflow.outputs.salt-version }}
|
||||
tools pkg repo publish release ${{ needs.prepare-workflow.outputs.salt-version }}
|
||||
|
||||
release:
|
||||
name: Release v${{ needs.prepare-workflow.outputs.salt-version }}
|
||||
|
|
2
.github/workflows/scheduled.yml
vendored
2
.github/workflows/scheduled.yml
vendored
|
@ -54,7 +54,7 @@ jobs:
|
|||
|
||||
trigger-branch-scheduled-builds:
|
||||
name: Trigger Branch Workflows
|
||||
if: ${{ github.event_name == 'schedule' }}
|
||||
if: ${{ github.event_name == 'schedule' && fromJSON(needs.workflow-requirements.outputs.requirements-met) }}
|
||||
runs-on:
|
||||
- ubuntu-latest
|
||||
needs:
|
||||
|
|
3
.github/workflows/staging.yml
vendored
3
.github/workflows/staging.yml
vendored
|
@ -1893,8 +1893,7 @@ jobs:
|
|||
|
||||
- name: Upload Repository Contents (staging)
|
||||
run: |
|
||||
tools pkg repo publish staging \
|
||||
${{ contains(needs.prepare-workflow.outputs.salt-version, 'rc') && '--rc-build' || '' }} artifacts/pkgs/repo/
|
||||
tools pkg repo publish staging --salt-version=${{ needs.prepare-workflow.outputs.salt-version }} artifacts/pkgs/repo/
|
||||
|
||||
upload-release-artifacts:
|
||||
name: Upload Release Artifacts
|
||||
|
|
|
@ -98,9 +98,6 @@ concurrency:
|
|||
|
||||
- name: Upload Repository Contents (<{ gh_environment }>)
|
||||
run: |
|
||||
tools pkg repo publish <{ gh_environment }>
|
||||
<%- if gh_environment in ("staging", "release") %> \
|
||||
${{ contains(needs.prepare-workflow.outputs.salt-version, 'rc') && '--rc-build' || '' }}
|
||||
<%- endif %> artifacts/pkgs/repo/
|
||||
tools pkg repo publish <{ gh_environment }> --salt-version=${{ needs.prepare-workflow.outputs.salt-version }} artifacts/pkgs/repo/
|
||||
|
||||
<%- endblock jobs %>
|
||||
|
|
|
@ -178,9 +178,7 @@ permissions:
|
|||
|
||||
- name: Publish Release Repository
|
||||
run: |
|
||||
tools pkg repo publish release \
|
||||
${{ contains(needs.prepare-workflow.outputs.salt-version, 'rc') && '--rc-build' || '' }} \
|
||||
--key-id=<{ gpg_key_id }> ${{ needs.prepare-workflow.outputs.salt-version }}
|
||||
tools pkg repo publish <{ gh_environment }> ${{ needs.prepare-workflow.outputs.salt-version }}
|
||||
|
||||
<%- if includes.get("test-pkg-downloads", True) %>
|
||||
<%- include "test-pkg-repo-downloads.yml.jinja" %>
|
||||
|
|
|
@ -6,7 +6,7 @@
|
|||
<{ job_name }>:
|
||||
<%- do conclusion_needs.append(job_name) %>
|
||||
name: Trigger Branch Workflows
|
||||
if: ${{ github.event_name == 'schedule' }}
|
||||
if: ${{ github.event_name == 'schedule' && fromJSON(needs.workflow-requirements.outputs.requirements-met) }}
|
||||
runs-on:
|
||||
- ubuntu-latest
|
||||
needs:
|
||||
|
|
|
@ -61,12 +61,12 @@ log = logging.getLogger(__name__)
|
|||
class SaltPkgInstall:
|
||||
conf_dir: pathlib.Path = attr.ib()
|
||||
system_service: bool = attr.ib(default=False)
|
||||
proc: Subprocess = attr.ib(init=False)
|
||||
proc: Subprocess = attr.ib(init=False, repr=False)
|
||||
pkgs: List[str] = attr.ib(factory=list)
|
||||
onedir: bool = attr.ib(default=False)
|
||||
singlebin: bool = attr.ib(default=False)
|
||||
compressed: bool = attr.ib(default=False)
|
||||
hashes: Dict[str, Dict[str, Any]] = attr.ib()
|
||||
hashes: Dict[str, Dict[str, Any]] = attr.ib(repr=False)
|
||||
root: pathlib.Path = attr.ib(default=None)
|
||||
run_root: pathlib.Path = attr.ib(default=None)
|
||||
ssm_bin: pathlib.Path = attr.ib(default=None)
|
||||
|
@ -91,7 +91,7 @@ class SaltPkgInstall:
|
|||
classic: bool = attr.ib(default=False)
|
||||
prev_version: str = attr.ib()
|
||||
pkg_version: str = attr.ib(default="1")
|
||||
repo_data: str = attr.ib(init=False)
|
||||
repo_data: str = attr.ib(init=False, repr=False)
|
||||
major: str = attr.ib(init=False)
|
||||
minor: str = attr.ib(init=False)
|
||||
relenv: bool = attr.ib(default=True)
|
||||
|
@ -554,22 +554,20 @@ class SaltPkgInstall:
|
|||
elif pkg.endswith("msi"):
|
||||
# Install the package
|
||||
log.debug("Installing: %s", str(pkg))
|
||||
# START_MINION="" does not work as documented. The service is
|
||||
# still starting. We need to fix this for RC2
|
||||
ret = self.proc.run(
|
||||
"msiexec.exe", "/qn", "/i", str(pkg), 'START_MINION=""'
|
||||
)
|
||||
# Write a batch file to run the installer. It is impossible to
|
||||
# perform escaping of the START_MINION property that the MSI
|
||||
# expects unless we do it via a batch file
|
||||
batch_file = pathlib.Path(pkg).parent / "install_msi.cmd"
|
||||
batch_content = f'msiexec /qn /i "{str(pkg)}" START_MINION=""\n'
|
||||
with open(batch_file, "w") as fp:
|
||||
fp.write(batch_content)
|
||||
# Now run the batch file
|
||||
ret = self.proc.run("cmd.exe", "/c", str(batch_file))
|
||||
self._check_retcode(ret)
|
||||
else:
|
||||
log.error("Invalid package: %s", pkg)
|
||||
return False
|
||||
|
||||
# Stop the service installed by the installer. We only need this
|
||||
# until we fix the issue where the MSI installer is starting the
|
||||
# salt-minion service when it shouldn't
|
||||
log.debug("Removing installed salt-minion service")
|
||||
self.proc.run(str(self.ssm_bin), "stop", "salt-minion")
|
||||
|
||||
# Remove the service installed by the installer
|
||||
log.debug("Removing installed salt-minion service")
|
||||
self.proc.run(str(self.ssm_bin), "remove", "salt-minion", "confirm")
|
||||
|
@ -731,18 +729,20 @@ class SaltPkgInstall:
|
|||
with open(pkg_path, "wb") as fp:
|
||||
fp.write(ret.content)
|
||||
if self.file_ext == "msi":
|
||||
ret = self.proc.run(
|
||||
"msiexec.exe", "/qn", "/i", str(pkg_path), 'START_MINION=""'
|
||||
)
|
||||
# Write a batch file to run the installer. It is impossible to
|
||||
# perform escaping of the START_MINION property that the MSI
|
||||
# expects unless we do it via a batch file
|
||||
batch_file = pkg_path.parent / "install_msi.cmd"
|
||||
batch_content = f'msiexec /qn /i {str(pkg_path)} START_MINION=""'
|
||||
with open(batch_file, "w") as fp:
|
||||
fp.write(batch_content)
|
||||
# Now run the batch file
|
||||
ret = self.proc.run("cmd.exe", "/c", str(batch_file))
|
||||
self._check_retcode(ret)
|
||||
else:
|
||||
ret = self.proc.run(pkg_path, "/start-minion=0", "/S")
|
||||
self._check_retcode(ret)
|
||||
|
||||
# Stop the service installed by the installer
|
||||
log.debug("Removing installed salt-minion service")
|
||||
self.proc.run(str(self.ssm_bin), "stop", "salt-minion")
|
||||
|
||||
log.debug("Removing installed salt-minion service")
|
||||
ret = self.proc.run(str(self.ssm_bin), "remove", "salt-minion", "confirm")
|
||||
self._check_retcode(ret)
|
||||
|
|
|
@ -13,7 +13,7 @@ def test_salt_upgrade(salt_call_cli, salt_minion, install_salt):
|
|||
assert ret.data
|
||||
|
||||
# test pip install before an upgrade
|
||||
dep = "PyGithub"
|
||||
dep = "PyGithub==1.56.0"
|
||||
repo = "https://github.com/saltstack/salt.git"
|
||||
install = salt_call_cli.run("--local", "pip.install", dep)
|
||||
assert install.returncode == 0
|
||||
|
|
|
@ -4,66 +4,71 @@ The installer offers properties for unattended/silent installations.
|
|||
|
||||
Example: install silently, set the master, don't start the service:
|
||||
|
||||
In cmd:
|
||||
> msiexec /i *.msi MASTER=salt2 START_MINION=""
|
||||
|
||||
Example: uninstall and remove configuration
|
||||
In powershell (you have to escape the quotes to disable starting the minion service):
|
||||
> msiexec /i *.msi MASTER=salt2 START_MINION=\`"\`"
|
||||
|
||||
Example: uninstall and remove configuration
|
||||
> MsiExec.exe /X *.msi REMOVE_CONFIG=1
|
||||
|
||||
## Notes
|
||||
|
||||
- The installer requires a privileged user
|
||||
- Properties must be upper case
|
||||
- Values of properties are case sensitve
|
||||
- Values must be quoted when they contain whitespace, or to unset a property, as in `START_MINION=""`
|
||||
- Creates a verbose log file, by default `%TEMP%\MSIxxxxx.LOG`, where xxxxx is random. The name of the log can be specified with `msiexec /log example.log`
|
||||
- extends the system `PATH` environment variable
|
||||
- Values of properties are case sensitive
|
||||
- Values must be quoted when they contain whitespace, or to unset a property, as in ``START_MINION=""``
|
||||
- In powershell, you must escape the quotes with a back tick for an empty string, ie: ``START_MINION=`"`"``
|
||||
- ``/l*v`` Creates a verbose log file, by default ``%TEMP%\MSIxxxxx.LOG``, where xxxxx is random. The name of the log can be specified with ``msiexec /l*v example.log``
|
||||
- ``/qn`` or ``/quiet`` installs quietly, suppressing all dialog boxes
|
||||
- ``/qb`` or ``/passive`` installs quietly but displays a simple progress bar
|
||||
|
||||
## Properties
|
||||
|
||||
Property | Default value | Comment
|
||||
---------------------- | ----------------------- | ------
|
||||
`MASTER` | `salt` | The master (name or IP). Separate multiple masters by comma.
|
||||
`MASTER_KEY` | | The master public key. See below.
|
||||
`MINION_ID` | Hostname | The minion id.
|
||||
`MINION_CONFIG` | | Content to be written to the `minion` config file. See below.
|
||||
`START_MINION` | `1` | Set to `""` to prevent the start of the `salt-minion` service.
|
||||
`MOVE_CONF` | | Set to `1` to move configuration from `C:\salt` to `%ProgramData%`.
|
||||
`REMOVE_CONFIG` | | Set to `1` to remove configuration on uninstall. Implied by `MINION_CONFIG`.
|
||||
`CLEAN_INSTALL` | | Set to `1` to remove configuration and cache before install or upgrade.
|
||||
`CONFIG_TYPE` | `Existing` | Set to `Custom` or `Default` for scenarios below.
|
||||
`CUSTOM_CONFIG` | | Name of a custom config file in the same path as the installer or full path. Requires `CONFIG_TYPE=Custom`. __ONLY FROM COMMANDLINE__
|
||||
`INSTALLDIR` | Windows default | Where to install binaries.
|
||||
`ROOTDIR` | `C:\ProgramData\Salt Project\Salt` | Where to install configuration.
|
||||
`ARPSYSTEMCOMPONENT` | | Set to `1` to hide "Salt Minion" in "Programs and Features".
|
||||
Property | Default value | Comment
|
||||
------------------------ | ----------------------- | ------
|
||||
``MASTER`` | ``salt`` | The master (name or IP). Separate multiple masters by comma.
|
||||
``MASTER_KEY`` | | The master public key. See below.
|
||||
``MINION_ID`` | Hostname | The minion id.
|
||||
``MINION_CONFIG`` | | Content to be written to the `minion` config file. See below.
|
||||
``START_MINION`` | ``1`` | Set to ``""`` to prevent the start of the ``salt-minion`` service. In powershell you must excape each quotation mark with a back tick (`` `"`" ``)
|
||||
``MOVE_CONF`` | | Set to ``1`` to move configuration from ``C:\salt`` to ``%ProgramData%``.
|
||||
``REMOVE_CONFIG`` | | Set to ``1`` to remove configuration on uninstall. Implied by ``MINION_CONFIG``.
|
||||
``CLEAN_INSTALL`` | | Set to ``1`` to remove configuration and cache before install or upgrade.
|
||||
``CONFIG_TYPE`` | ``Existing`` | Set to ``Custom`` or ``Default`` for scenarios below.
|
||||
``CUSTOM_CONFIG`` | | Name of a custom config file in the same path as the installer or full path. Requires ``CONFIG_TYPE=Custom``. __ONLY FROM COMMANDLINE__
|
||||
``INSTALLDIR`` | Windows default | Where to install binaries.
|
||||
``ROOTDIR`` | ``C:\ProgramData\Salt Project\Salt`` | Where to install configuration.
|
||||
``ARPSYSTEMCOMPONENT`` | | Set to ``1`` to hide "Salt Minion" in "Programs and Features".
|
||||
|
||||
|
||||
Master and id are read from file `conf\minion`
|
||||
Master and id are read from file ``conf\minion``
|
||||
|
||||
You can set a master with `MASTER`.
|
||||
You can set a master with ``MASTER``.
|
||||
|
||||
You can set a master public key with `MASTER_KEY`, after you converted it into one line like so:
|
||||
You can set a master public key with ``MASTER_KEY``, after you converted it into one line like so:
|
||||
|
||||
- Remove the first and the last line (`-----BEGIN PUBLIC KEY-----` and `-----END PUBLIC KEY-----`).
|
||||
- Remove the first and the last line (``-----BEGIN PUBLIC KEY-----`` and ``-----END PUBLIC KEY-----``).
|
||||
- Remove linebreaks.
|
||||
|
||||
### Property `MINION_CONFIG`
|
||||
### Property ``MINION_CONFIG``
|
||||
|
||||
If `MINION_CONFIG` is set:
|
||||
If ``MINION_CONFIG`` is set:
|
||||
|
||||
- Its content is written to configuraton file `conf\minion`, with `^` replaced by line breaks
|
||||
- Its content is written to configuration file ``conf\minion``, with ``^`` replaced by line breaks
|
||||
- All prior configuration is deleted:
|
||||
- all `minion.d\*.conf` files
|
||||
- the `minion_id` file
|
||||
- Implies `REMOVE_CONFIG=1`: uninstall will remove all configuration.
|
||||
- all ``minion.d\*.conf`` files
|
||||
- the ``minion_id`` file
|
||||
- Implies ``REMOVE_CONFIG=1``: uninstall will remove all configuration.
|
||||
|
||||
Example `MINION_CONFIG="master: Anna^id: Bob"` results in:
|
||||
Example ``MINION_CONFIG="master: Anna^id: Bob"`` results in:
|
||||
|
||||
master: Anna
|
||||
id: Bob
|
||||
|
||||
|
||||
### Property `CONFIG_TYPE`
|
||||
### Property ``CONFIG_TYPE``
|
||||
|
||||
There are 3 scenarios the installer tries to account for:
|
||||
|
||||
|
@ -75,30 +80,30 @@ Existing
|
|||
|
||||
This setting makes no changes to the existing config and just upgrades/downgrades salt.
|
||||
Makes for easy upgrades. Just run the installer with a silent option.
|
||||
If there is no existing config, then the default is used and `master` and `minion id` are applied if passed.
|
||||
If there is no existing config, then the default is used and ``master`` and ``minion id`` are applied if passed.
|
||||
|
||||
Custom
|
||||
|
||||
This setting will lay down a custom config passed via the command line.
|
||||
Since we want to make sure the custom config is applied correctly, we'll need to back up any existing config.
|
||||
1. `minion` config renamed to `minion-<timestamp>.bak`
|
||||
2. `minion_id` file renamed to `minion_id-<timestamp>.bak`
|
||||
3. `minion.d` directory renamed to `minion.d-<timestamp>.bak`
|
||||
Then the custom config is laid down by the installer... and `master` and `minion id` should be applied to the custom config if passed.
|
||||
1. ``minion`` config renamed to ``minion-<timestamp>.bak``
|
||||
2. ``minion_id`` file renamed to ``minion_id-<timestamp>.bak``
|
||||
3. ``minion.d`` directory renamed to ``minion.d-<timestamp>.bak``
|
||||
Then the custom config is laid down by the installer... and ``master`` and ``minion id`` should be applied to the custom config if passed.
|
||||
|
||||
Default
|
||||
|
||||
This setting will reset config to be the default config contained in the pkg.
|
||||
Therefore, all existing config files should be backed up
|
||||
1. `minion` config renamed to `minion-<timestamp>.bak`
|
||||
2. `minion_id` file renamed to `minion_id-<timestamp>.bak`
|
||||
3. `minion.d` directory renamed to `minion.d-<timestamp>.bak`
|
||||
Then the default config file is laid down by the installer... settings for `master` and `minion id` should be applied to the default config if passed
|
||||
1. ``minion`` config renamed to ``minion-<timestamp>.bak``
|
||||
2. ``minion_id`` file renamed to ``minion_id-<timestamp>.bak``
|
||||
3. ``minion.d`` directory renamed to ``minion.d-<timestamp>.bak``
|
||||
Then the default config file is laid down by the installer... settings for ``master`` and ``minion id`` should be applied to the default config if passed
|
||||
|
||||
|
||||
### Previous installation in C:\salt and how to install into C:\salt
|
||||
A previous installation or configuration in `C:\salt` causes an upgrade into `C:\salt`, unless you set `MOVE_CONF=1`.
|
||||
Set the two properties `INSTALLDIR=c:\salt ROOTDIR=c:\salt` to install binaries and configuration into `C:\salt`.
|
||||
A previous installation or configuration in ``C:\salt`` causes an upgrade into ``C:\salt``, unless you set ``MOVE_CONF=1``.
|
||||
Set the two properties ``INSTALLDIR=c:\salt ROOTDIR=c:\salt`` to install binaries and configuration into ``C:\salt``.
|
||||
|
||||
## Client requirements
|
||||
|
||||
|
|
Binary file not shown.
Binary file not shown.
BIN
pkg/windows/msi/tests/_mock_files/buildenv/Scripts/python310.dll
Normal file
BIN
pkg/windows/msi/tests/_mock_files/buildenv/Scripts/python310.dll
Normal file
Binary file not shown.
Binary file not shown.
|
@ -1,2 +1,2 @@
|
|||
properties CONFIG_TYPE=Default START_MINION="" MASTER=cli.master
|
||||
properties START_MINION="" CONFIG_TYPE=Default MASTER=cli.master
|
||||
dormant
|
||||
|
|
|
@ -1,2 +1,2 @@
|
|||
properties CONFIG_TYPE=Default START_MINION="" MINION_ID=cli.minion
|
||||
properties START_MINION="" CONFIG_TYPE=Default MINION_ID=cli.minion
|
||||
dormant
|
||||
|
|
|
@ -1,2 +1,2 @@
|
|||
properties CONFIG_TYPE=Default START_MINION="" MASTER=cli.master1,cli.master2
|
||||
properties START_MINION="" CONFIG_TYPE=Default MASTER=cli.master1,cli.master2
|
||||
dormant
|
||||
|
|
|
@ -1,2 +1,2 @@
|
|||
properties CONFIG_TYPE=Default START_MINION="" MASTER="cli.master1 cli.master2"
|
||||
properties START_MINION="" CONFIG_TYPE=Default MASTER="cli.master1 cli.master2"
|
||||
dormant
|
||||
|
|
|
@ -1 +1 @@
|
|||
properties CONFIG_TYPE=Custom CUSTOM_CONFIG=tests\config_tests\remove_config_custom_config.conf START_MINION="" REMOVE_CONFIG=1
|
||||
properties START_MINION="" CONFIG_TYPE=Custom CUSTOM_CONFIG=tests\config_tests\remove_config_custom_config.conf REMOVE_CONFIG=1
|
||||
|
|
|
@ -1 +1 @@
|
|||
properties CONFIG_TYPE=Default START_MINION="" REMOVE_CONFIG=1
|
||||
properties START_MINION="" CONFIG_TYPE=Default REMOVE_CONFIG=1
|
||||
|
|
348
tools/pkgrepo.py
348
tools/pkgrepo.py
|
@ -802,13 +802,21 @@ def src(
|
|||
"repo_path": {
|
||||
"help": "Local path for the repository that shall be published.",
|
||||
},
|
||||
"salt_version": {
|
||||
"help": "The salt version for which to build the repository",
|
||||
"required": True,
|
||||
},
|
||||
}
|
||||
)
|
||||
def nightly(ctx: Context, repo_path: pathlib.Path):
|
||||
def nightly(ctx: Context, repo_path: pathlib.Path, salt_version: str = None):
|
||||
"""
|
||||
Publish to the nightly bucket.
|
||||
"""
|
||||
_publish_repo(ctx, repo_path=repo_path, nightly_build=True)
|
||||
if TYPE_CHECKING:
|
||||
assert salt_version is not None
|
||||
_publish_repo(
|
||||
ctx, repo_path=repo_path, nightly_build=True, salt_version=salt_version
|
||||
)
|
||||
|
||||
|
||||
@publish.command(
|
||||
|
@ -816,16 +824,19 @@ def nightly(ctx: Context, repo_path: pathlib.Path):
|
|||
"repo_path": {
|
||||
"help": "Local path for the repository that shall be published.",
|
||||
},
|
||||
"rc_build": {
|
||||
"help": "Release Candidate repository target",
|
||||
"salt_version": {
|
||||
"help": "The salt version for which to build the repository",
|
||||
"required": True,
|
||||
},
|
||||
}
|
||||
)
|
||||
def staging(ctx: Context, repo_path: pathlib.Path, rc_build: bool = False):
|
||||
def staging(ctx: Context, repo_path: pathlib.Path, salt_version: str = None):
|
||||
"""
|
||||
Publish to the staging bucket.
|
||||
"""
|
||||
_publish_repo(ctx, repo_path=repo_path, rc_build=rc_build, stage=True)
|
||||
if TYPE_CHECKING:
|
||||
assert salt_version is not None
|
||||
_publish_repo(ctx, repo_path=repo_path, stage=True, salt_version=salt_version)
|
||||
|
||||
|
||||
@repo.command(
|
||||
|
@ -854,7 +865,9 @@ def backup_previous_releases(ctx: Context, salt_version: str = None):
|
|||
ctx.info(f"A backup prior to releasing {salt_version} has already been done.")
|
||||
ctx.exit(0)
|
||||
except ClientError as exc:
|
||||
if "404" not in str(exc):
|
||||
if "Error" not in exc.response:
|
||||
raise
|
||||
if exc.response["Error"]["Code"] != "404":
|
||||
raise
|
||||
|
||||
files_in_backup: dict[str, datetime] = {}
|
||||
|
@ -872,7 +885,6 @@ def backup_previous_releases(ctx: Context, salt_version: str = None):
|
|||
):
|
||||
files_to_backup.append((entry["Key"], entry["LastModified"]))
|
||||
|
||||
s3 = boto3.client("s3")
|
||||
with tools.utils.create_progress_bar() as progress:
|
||||
task = progress.add_task(
|
||||
"Back up previous releases", total=len(files_to_backup)
|
||||
|
@ -917,83 +929,106 @@ def backup_previous_releases(ctx: Context, salt_version: str = None):
|
|||
"salt_version": {
|
||||
"help": "The salt version to release.",
|
||||
},
|
||||
"rc_build": {
|
||||
"help": "Release Candidate repository target",
|
||||
},
|
||||
"key_id": {
|
||||
"help": "The GnuPG key ID used to sign.",
|
||||
"required": True,
|
||||
},
|
||||
}
|
||||
)
|
||||
def release(
|
||||
ctx: Context, salt_version: str, key_id: str = None, rc_build: bool = False
|
||||
):
|
||||
def release(ctx: Context, salt_version: str):
|
||||
"""
|
||||
Publish to the release bucket.
|
||||
"""
|
||||
if TYPE_CHECKING:
|
||||
assert key_id is not None
|
||||
|
||||
if rc_build:
|
||||
if "rc" in salt_version:
|
||||
bucket_folder = "salt_rc/salt/py3"
|
||||
else:
|
||||
bucket_folder = "salt/py3"
|
||||
|
||||
files_to_copy: list[str]
|
||||
files_to_delete: list[str] = []
|
||||
files_to_duplicate: list[tuple[str, str]] = []
|
||||
directories_to_delete: list[str] = []
|
||||
|
||||
ctx.info("Grabbing remote file listing of files to copy...")
|
||||
s3 = boto3.client("s3")
|
||||
repo_release_files_path = pathlib.Path(
|
||||
f"release-artifacts/{salt_version}/.release-files.json"
|
||||
)
|
||||
repo_release_symlinks_path = pathlib.Path(
|
||||
f"release-artifacts/{salt_version}/.release-symlinks.json"
|
||||
)
|
||||
with tempfile.TemporaryDirectory(prefix=f"{salt_version}_release_") as tsd:
|
||||
local_release_files_path = pathlib.Path(tsd) / repo_release_files_path.name
|
||||
try:
|
||||
with local_release_files_path.open("wb") as wfh:
|
||||
ctx.info(f"Downloading {repo_release_files_path} ...")
|
||||
s3.download_fileobj(
|
||||
Bucket=tools.utils.STAGING_BUCKET_NAME,
|
||||
Key=str(repo_release_files_path),
|
||||
Fileobj=wfh,
|
||||
)
|
||||
files_to_copy = json.loads(local_release_files_path.read_text())
|
||||
except ClientError as exc:
|
||||
if "Error" not in exc.response:
|
||||
log.exception(f"Error downloading {repo_release_files_path}: {exc}")
|
||||
ctx.exit(1)
|
||||
if exc.response["Error"]["Code"] == "404":
|
||||
ctx.error(f"Cloud not find {repo_release_files_path} in bucket.")
|
||||
ctx.exit(1)
|
||||
if exc.response["Error"]["Code"] == "400":
|
||||
ctx.error(
|
||||
f"Cloud not download {repo_release_files_path} from bucket: {exc}"
|
||||
)
|
||||
ctx.exit(1)
|
||||
log.exception(f"Error downloading {repo_release_files_path}: {exc}")
|
||||
ctx.exit(1)
|
||||
local_release_symlinks_path = (
|
||||
pathlib.Path(tsd) / repo_release_symlinks_path.name
|
||||
)
|
||||
try:
|
||||
with local_release_symlinks_path.open("wb") as wfh:
|
||||
ctx.info(f"Downloading {repo_release_symlinks_path} ...")
|
||||
s3.download_fileobj(
|
||||
Bucket=tools.utils.STAGING_BUCKET_NAME,
|
||||
Key=str(repo_release_symlinks_path),
|
||||
Fileobj=wfh,
|
||||
)
|
||||
directories_to_delete = json.loads(local_release_symlinks_path.read_text())
|
||||
except ClientError as exc:
|
||||
if "Error" not in exc.response:
|
||||
log.exception(f"Error downloading {repo_release_symlinks_path}: {exc}")
|
||||
ctx.exit(1)
|
||||
if exc.response["Error"]["Code"] == "404":
|
||||
ctx.error(f"Cloud not find {repo_release_symlinks_path} in bucket.")
|
||||
ctx.exit(1)
|
||||
if exc.response["Error"]["Code"] == "400":
|
||||
ctx.error(
|
||||
f"Cloud not download {repo_release_symlinks_path} from bucket: {exc}"
|
||||
)
|
||||
ctx.exit(1)
|
||||
log.exception(f"Error downloading {repo_release_symlinks_path}: {exc}")
|
||||
ctx.exit(1)
|
||||
|
||||
glob_match = f"{bucket_folder}/**/latest.repo"
|
||||
files_to_copy = _get_repo_file_list(
|
||||
bucket_name=tools.utils.STAGING_BUCKET_NAME,
|
||||
bucket_folder=bucket_folder,
|
||||
glob_match=glob_match,
|
||||
)
|
||||
glob_match = f"{bucket_folder}/**/minor/{salt_version}.repo"
|
||||
files_to_copy.extend(
|
||||
_get_repo_file_list(
|
||||
bucket_name=tools.utils.STAGING_BUCKET_NAME,
|
||||
bucket_folder=bucket_folder,
|
||||
glob_match=glob_match,
|
||||
)
|
||||
)
|
||||
if rc_build:
|
||||
glob_match = "{bucket_folder}/**/{}~rc{}*".format(
|
||||
*salt_version.split("rc"), bucket_folder=bucket_folder
|
||||
)
|
||||
files_to_copy.extend(
|
||||
_get_repo_file_list(
|
||||
bucket_name=tools.utils.STAGING_BUCKET_NAME,
|
||||
bucket_folder=bucket_folder,
|
||||
glob_match=glob_match,
|
||||
)
|
||||
)
|
||||
glob_match = f"{bucket_folder}/**/minor/{salt_version}/**"
|
||||
files_to_copy.extend(
|
||||
_get_repo_file_list(
|
||||
bucket_name=tools.utils.STAGING_BUCKET_NAME,
|
||||
bucket_folder=bucket_folder,
|
||||
glob_match=glob_match,
|
||||
)
|
||||
)
|
||||
glob_match = f"{bucket_folder}/**/src/{salt_version}/**"
|
||||
files_to_copy.extend(
|
||||
_get_repo_file_list(
|
||||
bucket_name=tools.utils.STAGING_BUCKET_NAME,
|
||||
bucket_folder=bucket_folder,
|
||||
glob_match=glob_match,
|
||||
)
|
||||
)
|
||||
|
||||
if not files_to_copy:
|
||||
ctx.error(f"Could not find any files related to the '{salt_version}' release.")
|
||||
ctx.exit(1)
|
||||
if directories_to_delete:
|
||||
with tools.utils.create_progress_bar() as progress:
|
||||
task = progress.add_task(
|
||||
"Deleting directories to override.",
|
||||
total=len(directories_to_delete),
|
||||
)
|
||||
for directory in directories_to_delete:
|
||||
try:
|
||||
objects_to_delete: list[dict[str, str]] = []
|
||||
for path in _get_repo_file_list(
|
||||
bucket_name=tools.utils.RELEASE_BUCKET_NAME,
|
||||
bucket_folder=bucket_folder,
|
||||
glob_match=f"{directory}/**",
|
||||
):
|
||||
objects_to_delete.append({"Key": path})
|
||||
if objects_to_delete:
|
||||
s3.delete_objects(
|
||||
Bucket=tools.utils.RELEASE_BUCKET_NAME,
|
||||
Delete={"Objects": objects_to_delete},
|
||||
)
|
||||
except ClientError:
|
||||
log.exception("Failed to delete remote files")
|
||||
finally:
|
||||
progress.update(task, advance=1)
|
||||
|
||||
already_copied_files: list[str] = []
|
||||
onedir_listing: dict[str, list[str]] = {}
|
||||
s3 = boto3.client("s3")
|
||||
with tools.utils.create_progress_bar() as progress:
|
||||
task = progress.add_task(
|
||||
|
@ -1002,22 +1037,6 @@ def release(
|
|||
for fpath in files_to_copy:
|
||||
if fpath in already_copied_files:
|
||||
continue
|
||||
if fpath.startswith(f"{bucket_folder}/windows/"):
|
||||
if "windows" not in onedir_listing:
|
||||
onedir_listing["windows"] = []
|
||||
onedir_listing["windows"].append(fpath)
|
||||
elif fpath.startswith(f"{bucket_folder}/macos/"):
|
||||
if "macos" not in onedir_listing:
|
||||
onedir_listing["macos"] = []
|
||||
onedir_listing["macos"].append(fpath)
|
||||
elif fpath.startswith(f"{bucket_folder}/onedir/"):
|
||||
if "onedir" not in onedir_listing:
|
||||
onedir_listing["onedir"] = []
|
||||
onedir_listing["onedir"].append(fpath)
|
||||
else:
|
||||
if "package" not in onedir_listing:
|
||||
onedir_listing["package"] = []
|
||||
onedir_listing["package"].append(fpath)
|
||||
ctx.info(f" * Copying {fpath}")
|
||||
try:
|
||||
s3.copy_object(
|
||||
|
@ -1038,8 +1057,6 @@ def release(
|
|||
progress.update(task, advance=1)
|
||||
|
||||
# Now let's get the onedir based repositories where we need to update several repo.json
|
||||
update_latest = False
|
||||
update_minor = False
|
||||
major_version = packaging.version.parse(salt_version).major
|
||||
with tempfile.TemporaryDirectory(prefix=f"{salt_version}_release_") as tsd:
|
||||
repo_path = pathlib.Path(tsd)
|
||||
|
@ -1049,7 +1066,6 @@ def release(
|
|||
repo_path,
|
||||
salt_version,
|
||||
distro,
|
||||
rc_build=rc_build,
|
||||
)
|
||||
repo_json_path = create_repo_path.parent.parent / "repo.json"
|
||||
|
||||
|
@ -1105,39 +1121,10 @@ def release(
|
|||
release_minor_repo_json[salt_version] = release_json
|
||||
|
||||
if latest_version <= salt_version:
|
||||
update_latest = True
|
||||
release_repo_json["latest"] = release_json
|
||||
glob_match = f"{bucket_folder}/{distro}/**/latest/**"
|
||||
files_to_delete.extend(
|
||||
_get_repo_file_list(
|
||||
bucket_name=tools.utils.RELEASE_BUCKET_NAME,
|
||||
bucket_folder=bucket_folder,
|
||||
glob_match=glob_match,
|
||||
)
|
||||
)
|
||||
for fpath in onedir_listing[distro]:
|
||||
files_to_duplicate.append(
|
||||
(fpath, fpath.replace(f"minor/{salt_version}", "latest"))
|
||||
)
|
||||
|
||||
if latest_minor_version <= salt_version:
|
||||
update_minor = True
|
||||
release_minor_repo_json["latest"] = release_json
|
||||
glob_match = f"{bucket_folder}/{distro}/**/{major_version}/**"
|
||||
files_to_delete.extend(
|
||||
_get_repo_file_list(
|
||||
bucket_name=tools.utils.RELEASE_BUCKET_NAME,
|
||||
bucket_folder=bucket_folder,
|
||||
glob_match=glob_match,
|
||||
)
|
||||
)
|
||||
for fpath in onedir_listing[distro]:
|
||||
files_to_duplicate.append(
|
||||
(
|
||||
fpath,
|
||||
fpath.replace(f"minor/{salt_version}", str(major_version)),
|
||||
)
|
||||
)
|
||||
|
||||
ctx.info(f"Writing {minor_repo_json_path} ...")
|
||||
minor_repo_json_path.write_text(
|
||||
|
@ -1146,86 +1133,6 @@ def release(
|
|||
ctx.info(f"Writing {repo_json_path} ...")
|
||||
repo_json_path.write_text(json.dumps(release_repo_json, sort_keys=True))
|
||||
|
||||
# Now lets handle latest and minor updates for non one dir based repositories
|
||||
onedir_based_paths = (
|
||||
f"{bucket_folder}/windows/",
|
||||
f"{bucket_folder}/macos/",
|
||||
f"{bucket_folder}/onedir/",
|
||||
)
|
||||
if update_latest:
|
||||
glob_match = f"{bucket_folder}/**/latest/**"
|
||||
for fpath in _get_repo_file_list(
|
||||
bucket_name=tools.utils.RELEASE_BUCKET_NAME,
|
||||
bucket_folder=bucket_folder,
|
||||
glob_match=glob_match,
|
||||
):
|
||||
if fpath.startswith(onedir_based_paths):
|
||||
continue
|
||||
files_to_delete.append(fpath)
|
||||
|
||||
for fpath in onedir_listing["package"]:
|
||||
files_to_duplicate.append(
|
||||
(fpath, fpath.replace(f"minor/{salt_version}", "latest"))
|
||||
)
|
||||
|
||||
if update_minor:
|
||||
glob_match = f"{bucket_folder}/**/{major_version}/**"
|
||||
for fpath in _get_repo_file_list(
|
||||
bucket_name=tools.utils.RELEASE_BUCKET_NAME,
|
||||
bucket_folder=bucket_folder,
|
||||
glob_match=glob_match,
|
||||
):
|
||||
if fpath.startswith(onedir_based_paths):
|
||||
continue
|
||||
files_to_delete.append(fpath)
|
||||
|
||||
for fpath in onedir_listing["package"]:
|
||||
files_to_duplicate.append(
|
||||
(fpath, fpath.replace(f"minor/{salt_version}", str(major_version)))
|
||||
)
|
||||
|
||||
if files_to_delete:
|
||||
with tools.utils.create_progress_bar() as progress:
|
||||
task = progress.add_task(
|
||||
"Deleting directories to override.", total=len(files_to_delete)
|
||||
)
|
||||
try:
|
||||
s3.delete_objects(
|
||||
Bucket=tools.utils.RELEASE_BUCKET_NAME,
|
||||
Delete={
|
||||
"Objects": [
|
||||
{"Key": path for path in files_to_delete},
|
||||
]
|
||||
},
|
||||
)
|
||||
except ClientError:
|
||||
log.exception("Failed to delete remote files")
|
||||
finally:
|
||||
progress.update(task, advance=1)
|
||||
|
||||
with tools.utils.create_progress_bar() as progress:
|
||||
task = progress.add_task(
|
||||
"Copying files between buckets", total=len(files_to_duplicate)
|
||||
)
|
||||
for src, dst in files_to_duplicate:
|
||||
ctx.info(f" * Copying {src}\n -> {dst}")
|
||||
try:
|
||||
s3.copy_object(
|
||||
Bucket=tools.utils.RELEASE_BUCKET_NAME,
|
||||
Key=dst,
|
||||
CopySource={
|
||||
"Bucket": tools.utils.STAGING_BUCKET_NAME,
|
||||
"Key": src,
|
||||
},
|
||||
MetadataDirective="COPY",
|
||||
TaggingDirective="COPY",
|
||||
ServerSideEncryption="AES256",
|
||||
)
|
||||
except ClientError:
|
||||
log.exception(f"Failed to copy {fpath}")
|
||||
finally:
|
||||
progress.update(task, advance=1)
|
||||
|
||||
for dirpath, dirnames, filenames in os.walk(repo_path, followlinks=True):
|
||||
for path in filenames:
|
||||
upload_path = pathlib.Path(dirpath, path)
|
||||
|
@ -1247,9 +1154,6 @@ def release(
|
|||
"salt_version": {
|
||||
"help": "The salt version to release.",
|
||||
},
|
||||
"rc_build": {
|
||||
"help": "Release Candidate repository target",
|
||||
},
|
||||
"key_id": {
|
||||
"help": "The GnuPG key ID used to sign.",
|
||||
"required": True,
|
||||
|
@ -1266,7 +1170,6 @@ def github(
|
|||
ctx: Context,
|
||||
salt_version: str,
|
||||
key_id: str = None,
|
||||
rc_build: bool = False,
|
||||
repository: str = "saltstack/salt",
|
||||
):
|
||||
"""
|
||||
|
@ -1402,7 +1305,12 @@ def _get_salt_releases(ctx: Context, repository: str) -> list[Version]:
|
|||
"""
|
||||
versions = set()
|
||||
with ctx.web as web:
|
||||
web.headers.update({"Accept": "application/vnd.github+json"})
|
||||
headers = {
|
||||
"Accept": "application/vnd.github+json",
|
||||
}
|
||||
if "GITHUB_TOKEN" in os.environ:
|
||||
headers["Authorization"] = f"Bearer {os.environ['GITHUB_TOKEN']}"
|
||||
web.headers.update(headers)
|
||||
ret = web.get(f"https://api.github.com/repos/{repository}/tags")
|
||||
if ret.status_code != 200:
|
||||
ctx.error(
|
||||
|
@ -1743,8 +1651,8 @@ def _get_file_checksum(fpath: pathlib.Path, hash_name: str) -> str:
|
|||
def _publish_repo(
|
||||
ctx: Context,
|
||||
repo_path: pathlib.Path,
|
||||
salt_version: str,
|
||||
nightly_build: bool = False,
|
||||
rc_build: bool = False,
|
||||
stage: bool = False,
|
||||
):
|
||||
"""
|
||||
|
@ -1761,6 +1669,8 @@ def _publish_repo(
|
|||
s3 = boto3.client("s3")
|
||||
to_delete_paths: dict[pathlib.Path, list[dict[str, str]]] = {}
|
||||
to_upload_paths: list[pathlib.Path] = []
|
||||
symlink_paths: list[str] = []
|
||||
uploaded_files: list[str] = []
|
||||
for dirpath, dirnames, filenames in os.walk(repo_path, followlinks=True):
|
||||
for dirname in dirnames:
|
||||
path = pathlib.Path(dirpath, dirname)
|
||||
|
@ -1782,6 +1692,7 @@ def _publish_repo(
|
|||
for entry in ret["Contents"]:
|
||||
objects.append({"Key": entry["Key"]})
|
||||
to_delete_paths[path] = objects
|
||||
symlink_paths.append(str(relpath))
|
||||
except ClientError as exc:
|
||||
if "Error" not in exc.response:
|
||||
raise
|
||||
|
@ -1823,7 +1734,36 @@ def _publish_repo(
|
|||
bucket_name,
|
||||
str(relpath),
|
||||
Callback=tools.utils.UpdateProgress(progress, task),
|
||||
ExtraArgs={
|
||||
"Metadata": {
|
||||
"x-amz-meta-salt-release-version": salt_version,
|
||||
}
|
||||
},
|
||||
)
|
||||
uploaded_files.append(str(relpath))
|
||||
if stage is True:
|
||||
repo_files_path = f"release-artifacts/{salt_version}/.release-files.json"
|
||||
ctx.info(f"Uploading {repo_files_path} ...")
|
||||
s3.put_object(
|
||||
Key=repo_files_path,
|
||||
Bucket=bucket_name,
|
||||
Body=json.dumps(uploaded_files).encode(),
|
||||
Metadata={
|
||||
"x-amz-meta-salt-release-version": salt_version,
|
||||
},
|
||||
)
|
||||
repo_symlinks_path = (
|
||||
f"release-artifacts/{salt_version}/.release-symlinks.json"
|
||||
)
|
||||
ctx.info(f"Uploading {repo_symlinks_path} ...")
|
||||
s3.put_object(
|
||||
Key=repo_symlinks_path,
|
||||
Bucket=bucket_name,
|
||||
Body=json.dumps(symlink_paths).encode(),
|
||||
Metadata={
|
||||
"x-amz-meta-salt-release-version": salt_version,
|
||||
},
|
||||
)
|
||||
except KeyboardInterrupt:
|
||||
pass
|
||||
|
||||
|
|
|
@ -63,6 +63,8 @@ def upload_artifacts(ctx: Context, salt_version: str, artifacts_path: pathlib.Pa
|
|||
if "Contents" in ret:
|
||||
objects = []
|
||||
for entry in ret["Contents"]:
|
||||
if entry["Key"].endswith(".release-backup-done"):
|
||||
continue
|
||||
objects.append({"Key": entry["Key"]})
|
||||
to_delete_paths.extend(objects)
|
||||
except ClientError as exc:
|
||||
|
|
Loading…
Add table
Reference in a new issue