diff --git a/.zuul.d/config.yaml b/.zuul.d/config.yaml index 61b378166..902123cb4 100644 --- a/.zuul.d/config.yaml +++ b/.zuul.d/config.yaml @@ -18,6 +18,7 @@ - job: name: scs-check-adr-syntax parent: base + nodeset: pod-fedora-40 pre-run: playbooks/pre.yaml run: playbooks/adr_syntax.yaml - job: @@ -26,6 +27,7 @@ secrets: - name: clouds_conf secret: SECRET_STANDARDS + nodeset: pod-fedora-40 vars: preset: default pre-run: diff --git a/Standards/scs-0001-v1-sovereign-cloud-standards.md b/Standards/scs-0001-v1-sovereign-cloud-standards.md index 48ef64c49..eabfff020 100644 --- a/Standards/scs-0001-v1-sovereign-cloud-standards.md +++ b/Standards/scs-0001-v1-sovereign-cloud-standards.md @@ -107,7 +107,7 @@ embedded in the markdown header. | Field name | Requirement | Description | | --------------- | -------------------------------------------------------------------------- | ------------------------------------------------------------------------------------- | | `type` | REQUIRED | one of `Procedural`, `Standard`, `Decision Record`, or `Supplement` | -| `status` | REQUIRED | one of `Proposal`, `Draft`, `Stable`, `Deprecated`, or `Rejected` | +| `status` | REQUIRED | one of `Draft`, `Stable`, `Deprecated`, or `Rejected` | | `track` | REQUIRED | one of `Global`, `IaaS`, `KaaS`, `IAM`, `Ops` | | `supplements` | REQUIRED precisely when `type` is `Supplement` | list of documents that are extended by this document (e.g., multiple major versions) | | `deprecated_at` | REQUIRED if `status` is `Deprecated` | ISO formatted date indicating the date after which the deprecation is in effect | @@ -167,11 +167,11 @@ In addition, the following OPTIONAL sections should be considered: ## Process The lifecycle of an SCS document goes through the following phases: -Proposal, Draft, Stable, Deprecated, and Rejected. +Draft, Stable, Deprecated, and Rejected. ```mermaid graph TD - A[Proposal] -->|Pull Request| B[Draft] + A["Draft (Proposal)"] -->|Pull Request| B[Draft] B -->|Pull Request| D[Stable] B -->|Pull Request| E[Rejected] D -->|Pull Request| F[Deprecated] @@ -195,8 +195,15 @@ Supplements may be kept in Draft state, because they are not authoritative. To propose a new SCS document, a community participant creates a pull request on GitHub against the [standards repository in the SovereignCloudStack organisation][scs-standards-repo]. - -The pull request MUST add exactly one SCS document, +In the beginning, the pull request will contain a draft of an SCS document and +the community participant should present it to the SCS community. +They may refer to the [SCS Community page](https://docs.scs.community/community/) +for an overview of applicable means of communication and online meetings +to get in touch with the SCS community. +Community participants are encouraged to present their proposal to the SCS community early on. +Note that the proposal draft's content does not need to be finished in any way at this stage. + +The pull request for the proposal MUST add exactly one SCS document, in the `Standards` folder. In the proposal phase, the document number MUST be replaced with `xxxx` in the file name, @@ -209,7 +216,7 @@ for a Supplement of `scs-0100-v3-flavor-naming.md`, the file name might be `scs-0100-w1-flavor-naming-implementation-testing.md` (note the `w1`!). The metadata MUST indicate the intended `track` and `type` of the document, -and the `status` MUST be set to `Proposal`; +and the `status` MUST be set to `Draft`; for a Supplement, the `supplements` field MUST be set to a list of documents (usually containing one element). @@ -217,7 +224,8 @@ Upon acceptance by the group of people identified by the `track`, a number is assigned (the next unused number) and the proposer is asked -to rename the file to replace the `xxxx` with that number. +to rename the file to replace the `xxxx` with that number +before the merge of the pull request. **Note:** Documents on the `Design Record` track MAY be proposed or accepted directly into `Stable` state, diff --git a/Standards/scs-0117-v1-volume-backup-service.md b/Standards/scs-0117-v1-volume-backup-service.md new file mode 100644 index 000000000..d272dfa05 --- /dev/null +++ b/Standards/scs-0117-v1-volume-backup-service.md @@ -0,0 +1,97 @@ +--- +title: Volume Backup Functionality +type: Standard +status: Draft +track: IaaS +--- + +## Introduction + +OpenStack offers a variety of resources where users are able to transfer and store data in the infrastructure. +A prime example of these resources are volumes which are attached to virtual machines as virtual block storage devices. +As such they carry potentially large amounts of user data which is constantly changing at runtime. +It is important for users to have the ability to create backups of this data in a reliable and effifcient manner. + +## Terminology + +| Term | Meaning | +|---|---| +| CSP | Cloud Service Provider, provider managing the OpenStack infrastructure | +| IaaS | Abbreviation for Infrastructure as a Service | +| Image | IaaS resource representing a snapshot of a block storage disk, can be used to create Volumes | +| Volume | IaaS resource representing a virtual block storage device that can be attached as a disk to virtual machines | + +## Motivation + +The [volume backup functionality of the Block Storage API](https://docs.openstack.org/cinder/latest/admin/volume-backups.html) is a feature that is not available in all clouds per default, e.g., in OpenStack. +The feature requires a backend to be prepared and configured correctly before it can be used. +In the Block Storage service, the backup storage backend is usually configured separately from the storage backend of the general volume service and may not be mandatory. +Thus, an arbitrary cloud may or may not offer the backup feature in the Block Storage API. + +This standard aims to make this functionality the default in SCS clouds so that customers can expect the feature to be usable. + +## Design Considerations + +The standard should make sure that the feature is available and usable but should not limit the exact implementation (e.g. choice of backend driver) any further than necessary. + +### Options considered + +#### Only recommend volume backup feature, use images as alternative + +As an alternative to the volume backup feature of the Block Storage API, images can also be created based on volumes and act as a backup under certain circumstances. +As an option, this standard could keep the actual integration of the volume backup feature optional and guide users how to use images as backup targets instead in case the feature is unavailable. + +However, it is not guaranteed that the image backend storage is separate from the volume storage. +For instance, both could be using the same Ceph cluster. +In such case, the images would not count as genuine backups. + +Although users are able to download images and transfer them to a different storage location, this approach might also prove unfeasible depending on the image size and the existence (or lack) of appropriate target storage on the user side. + +Furthermore, incremental backups are not possible when creating images from volumes either. +This results in time-consuming backup operations of fully copying a volume everytime a backup is created. + +#### Focus on feature availability, make feature mandatory + +This option is pretty straightforward. +It would make the volume backup feature mandatory for SCS clouds. +This way users can expect the feature to be available and usable. + +With this, users can leverage functionalities like incremental backups and benefit from optimized performance of the backup process due to the tight integration with the volume service. + +However, it does not seem feasible to also mandate having a separate storage backend for volume backups at the same time due to potential infrastructure limitations at CSP-side making it hard or even impossible to offer. +As such, the actual benefit of backups in terms of reliability and security aspects would be questionable if a separate storage backend is not mandated and therefore not guaranteed. + +This approach would focus on feature availability rather than backup reliability. + +#### Focus on backup reliability, make separate backend mandatory + +As an alternative, the volume backup feature availability could be made optional but in case a CSP chooses to offer it, the standard would mandate a separate storage backend to be used for volume backups. +This way, failures of the volume storage backend would not directly impact the availability and safety of volume backups, making them actually live up to their name. + +In contrast to the above, this approach would focus on backup reliability rather than feature availability. + +## Standard + +This standard decides to go with the second option and makes the volume backup feature mandatory in the following way: + +In an SCS cloud, the volume backup functionality MUST be configured properly and its API as defined per `/v3/{project_id}/backups` MUST be offered to customers. +If using Cinder, a suitable [backup driver](https://docs.openstack.org/cinder/latest/configuration/block-storage/backup-drivers.html) MUST be set up. + +The volume backup target storage SHOULD be a separate storage system from the one used for volumes themselves. + +## Related Documents + +- [OpenStack Block Storage v3 Backup API reference](https://docs.openstack.org/api-ref/block-storage/v3/index.html#backups-backups) +- [OpenStack Volume Backup Drivers](https://docs.openstack.org/cinder/latest/configuration/block-storage/backup-drivers.html) + +## Conformance Tests + +Conformance tests include using the `/v3/{project_id}/backups` Block Storage API endpoint to create a volume and a backup of it as a non-admin user and subsequently restore the backup on a new volume while verifying the success of each operation. +These tests verify the mandatory part of the standard: providing the Volume Backup API. + +There is a test suite in [`volume-backup-tester.py`](https://github.com/SovereignCloudStack/standards/blob/main/Tests/iaas/volume-backup/volume-backup-tester.py). +The test suite connects to the OpenStack API and executes basic operations using the volume backup API to verify that the functionality requested by the standard is available. +Please consult the associated [README.md](https://github.com/SovereignCloudStack/standards/blob/main/Tests/iaas/volume-backup/README.md) for detailed setup and testing instructions. + +Note that these tests don't verify the optional part of the standard: providing a separate storage backend for Cinder volume backups. +This cannot be checked from outside of the infrastructure as it is an architectural property of the infrastructure itself and transparent to customers. diff --git a/Tests/iaas/volume-backup/README.md b/Tests/iaas/volume-backup/README.md new file mode 100644 index 000000000..2b6cd4716 --- /dev/null +++ b/Tests/iaas/volume-backup/README.md @@ -0,0 +1,70 @@ +# Volume Backup API Test Suite + +## Test Environment Setup + +### Test Execution Environment + +> **NOTE:** The test execution procedure does not require cloud admin rights. + +To execute the test suite a valid cloud configuration for the OpenStack SDK in the shape of "`clouds.yaml`" is mandatory[^1]. +**The file is expected to be located in the current working directory where the test script is executed unless configured otherwise.** + +[^1]: [OpenStack Documentation: Configuring OpenStack SDK Applications](https://docs.openstack.org/openstacksdk/latest/user/config/configuration.html) + +The test execution environment can be located on any system outside of the cloud infrastructure that has OpenStack API access. +Make sure that the API access is configured properly in "`clouds.yaml`". + +It is recommended to use a Python virtual environment[^2]. +Next, install the OpenStack SDK required by the test suite: + +```bash +pip3 install openstacksdk +``` + +Within this environment execute the test suite. + +[^2]: [Python 3 Documentation: Virtual Environments and Packages](https://docs.python.org/3/tutorial/venv.html) + +## Test Execution + +The test suite is executed as follows: + +```bash +python3 volume-backup-tester.py --os-cloud mycloud +``` + +As an alternative to "`--os-cloud`", the "`OS_CLOUD`" environment variable may be specified instead. +The parameter is used to look up the correct cloud configuration in "`clouds.yaml`". +For the example command above, this file should contain a `clouds.mycloud` section like this: + +```yaml +--- +clouds: + mycloud: + auth: + auth_url: ... + ... + ... +``` + +If the test suite fails and leaves test resources behind, the "`--cleanup-only`" flag may be used to delete those resources from the domains: + +```bash +python3 volume-backup-tester.py --os-cloud mycloud --cleanup-only +``` + +For any further options consult the output of "`python3 volume-backup-tester.py --help`". + +### Script Behavior & Test Results + +> **NOTE:** Before any execution of test batches, the script will automatically perform a cleanup of volumes and volume backups matching a special prefix (see the "`--prefix`" flag). +> This cleanup behavior is identical to "`--cleanup-only`". + +The script will print all cleanup actions and passed tests to `stdout`. + +If all tests pass, the script will return with an exit code of `0`. + +If any test fails, the script will halt, print the exact error to `stderr` and return with a non-zero exit code. + +In case of a failed test, cleanup is not performed automatically, allowing for manual inspection of the cloud state for debugging purposes. +Although unnecessary due to automatic cleanup upon next execution, you can manually trigger a cleanup using the "`--cleanup-only`" flag of this script. diff --git a/Tests/iaas/volume-backup/volume-backup-tester.py b/Tests/iaas/volume-backup/volume-backup-tester.py new file mode 100644 index 000000000..f4fa9522d --- /dev/null +++ b/Tests/iaas/volume-backup/volume-backup-tester.py @@ -0,0 +1,282 @@ +"""Volume Backup API tester for Block Storage API + +This test script executes basic operations on the Block Storage API centered +around volume backups. Its purpose is to verify that the Volume Backup API is +available and working as expected using simple operations such as creating and +restoring volume backups. + +It verifies that a properly configured backup driver is present to the extent +that aforementioned operations succeed on the API level. It does not by any +means verify that the backup and restore procedures actual handle the data +correctly (it only uses empty volumes and does not look at data for the sake +of simplicity). +""" + +import argparse +import getpass +import os +import time +import typing + +import openstack + +# prefix to be included in the names of any Keystone resources created +# used by the cleanup routine to identify resources that can be safely deleted +DEFAULT_PREFIX = "scs-test-" + +# timeout in seconds for resource availability checks +# (e.g. a volume becoming available) +WAIT_TIMEOUT = 60 + + +def connect(cloud_name: str, password: typing.Optional[str] = None + ) -> openstack.connection.Connection: + """Create a connection to an OpenStack cloud + + :param string cloud_name: + The name of the configuration to load from clouds.yaml. + + :param string password: + Optional password override for the connection. + + :returns: openstack.connnection.Connection + """ + + if password: + return openstack.connect( + cloud=cloud_name, + password=password + ) + else: + return openstack.connect( + cloud=cloud_name, + ) + + +def test_backup(conn: openstack.connection.Connection, + prefix=DEFAULT_PREFIX, timeout=WAIT_TIMEOUT) -> None: + """Execute volume backup tests on the connection + + This will create an empty volume, a backup of that empty volume and then + attempt to restore the backup onto a new volume. + Purpose of these tests is to verify that the volume backup API is working + correctly. + """ + + # CREATE VOLUME + print("Creating volume ...") + volume = conn.block_storage.create_volume( + name=f"{prefix}volume", + size=1 + ) + assert volume is not None, ( + "Initial volume creation failed" + ) + volume_id = volume.id + assert conn.block_storage.get_volume(volume_id) is not None, ( + "Retrieving initial volume by ID failed" + ) + + print( + f"↳ waiting for volume with ID '{volume_id}' to reach status " + f"'available' ..." + ) + seconds_waited = 0 + while conn.block_storage.get_volume(volume_id).status != "available": + time.sleep(1.0) + seconds_waited += 1 + assert seconds_waited < timeout, ( + f"Timeout reached while waiting for volume to reach status " + f"'available' (volume id: {volume_id}) after {seconds_waited} " + f"seconds" + ) + print("Create empty volume: PASS") + + # CREATE BACKUP + print("Creating backup from volume ...") + backup = conn.block_storage.create_backup( + name=f"{prefix}volume-backup", + volume_id=volume_id + ) + assert backup is not None, ( + "Backup creation failed" + ) + backup_id = backup.id + assert conn.block_storage.get_backup(backup_id) is not None, ( + "Retrieving backup by ID failed" + ) + + print(f"↳ waiting for backup '{backup_id}' to become available ...") + seconds_waited = 0 + while conn.block_storage.get_backup(backup_id).status != "available": + time.sleep(1.0) + seconds_waited += 1 + assert seconds_waited < timeout, ( + f"Timeout reached while waiting for backup to reach status " + f"'available' (backup id: {backup_id}) after {seconds_waited} " + f"seconds" + ) + print("Create backup from volume: PASS") + + # RESTORE BACKUP + print("Restoring backup to volume ...") + restored_volume_name = f"{prefix}restored-backup" + conn.block_storage.restore_backup( + backup_id, + name=restored_volume_name + ) + + print( + f"↳ waiting for restoration target volume '{restored_volume_name}' " + f"to be created ..." + ) + seconds_waited = 0 + while conn.block_storage.find_volume(restored_volume_name) is None: + time.sleep(1.0) + seconds_waited += 1 + assert seconds_waited < timeout, ( + f"Timeout reached while waiting for restored volume to be created " + f"(volume name: {restored_volume_name}) after {seconds_waited} " + f"seconds" + ) + # wait for the volume restoration to finish + print( + f"↳ waiting for restoration target volume '{restored_volume_name}' " + f"to reach 'available' status ..." + ) + volume_id = conn.block_storage.find_volume(restored_volume_name).id + while conn.block_storage.get_volume(volume_id).status != "available": + time.sleep(1.0) + seconds_waited += 1 + assert seconds_waited < timeout, ( + f"Timeout reached while waiting for restored volume reach status " + f"'available' (volume id: {volume_id}) after {seconds_waited} " + f"seconds" + ) + print("Restore volume from backup: PASS") + + +def cleanup(conn: openstack.connection.Connection, prefix=DEFAULT_PREFIX, + timeout=WAIT_TIMEOUT): + """ + Looks up volume and volume backup resources matching the given prefix and + deletes them. + """ + + def wait_for_resource(resource_type: str, resource_id: str, + expected_status="available") -> None: + seconds_waited = 0 + get_func = getattr(conn.block_storage, f"get_{resource_type}") + while get_func(resource_id).status != expected_status: + time.sleep(1.0) + seconds_waited += 1 + assert seconds_waited < timeout, ( + f"Timeout reached while waiting for {resource_type} during " + f"cleanup to be in status '{expected_status}' " + f"({resource_type} id: {resource_id}) after {seconds_waited} " + f"seconds" + ) + + print(f"\nPerforming cleanup for resources with the " + f"'{prefix}' prefix ...") + + backups = conn.block_storage.backups() + for backup in backups: + if backup.name.startswith(prefix): + try: + wait_for_resource("backup", backup.id) + except openstack.exceptions.ResourceNotFound: + # if the resource has vanished on + # its own in the meantime ignore it + continue + print(f"↳ deleting volume backup '{backup.id}' ...") + conn.block_storage.delete_backup(backup.id) + + # wait for all backups to be cleaned up before attempting to remove volumes + seconds_waited = 0 + while len( + # list of all backups whose name starts with the prefix + [b for b in conn.block_storage.backups() if b.name.startswith(prefix)] + ) > 0: + time.sleep(1.0) + seconds_waited += 1 + assert seconds_waited < timeout, ( + f"Timeout reached while waiting for all backups with prefix " + f"'{prefix}' to finish deletion" + ) + + volumes = conn.block_storage.volumes() + for volume in volumes: + if volume.name.startswith(prefix): + try: + wait_for_resource("volume", volume.id) + except openstack.exceptions.ResourceNotFound: + # if the resource has vanished on + # its own in the meantime ignore it + continue + print(f"↳ deleting volume '{volume.id}' ...") + conn.block_storage.delete_volume(volume.id) + + +def main(): + parser = argparse.ArgumentParser( + description="SCS Volume Backup API Conformance Checker") + parser.add_argument( + "--os-cloud", type=str, + help="Name of the cloud from clouds.yaml, alternative " + "to the OS_CLOUD environment variable" + ) + parser.add_argument( + "--ask", + help="Ask for password interactively instead of reading it from the " + "clouds.yaml", + action="store_true" + ) + parser.add_argument( + "--debug", action="store_true", + help="Enable OpenStack SDK debug logging" + ) + parser.add_argument( + "--prefix", type=str, + default=DEFAULT_PREFIX, + help=f"OpenStack resource name prefix for all resources to be created " + f"and/or cleaned up by this script within the configured domains " + f"(default: '{DEFAULT_PREFIX}')" + ) + parser.add_argument( + "--timeout", type=int, + default=WAIT_TIMEOUT, + help=f"Timeout in seconds for operations waiting for resources to " + f"become available such as creating volumes and volume backups " + f"(default: '{WAIT_TIMEOUT}')" + ) + parser.add_argument( + "--cleanup-only", action="store_true", + help="Instead of executing tests, cleanup all resources " + "with the prefix specified via '--prefix' (or its default)" + ) + args = parser.parse_args() + openstack.enable_logging(debug=args.debug) + + # parse cloud name for lookup in clouds.yaml + cloud = os.environ.get("OS_CLOUD", None) + if args.os_cloud: + cloud = args.os_cloud + assert cloud, ( + "You need to have the OS_CLOUD environment variable set to your " + "cloud name or pass it via --os-cloud" + ) + conn = connect( + cloud, + password=getpass.getpass("Enter password: ") if args.ask else None + ) + if args.cleanup_only: + cleanup(conn, prefix=args.prefix, timeout=args.timeout) + else: + cleanup(conn, prefix=args.prefix, timeout=args.timeout) + test_backup(conn, prefix=args.prefix, timeout=args.timeout) + cleanup(conn, prefix=args.prefix, timeout=args.timeout) + + +if __name__ == "__main__": + main() diff --git a/Tests/kaas/kaas-sonobuoy-go-example-e2e-framework/Dockerfile b/Tests/kaas/kaas-sonobuoy-go-example-e2e-framework/Dockerfile index 2519a36b7..5c75f80d3 100644 --- a/Tests/kaas/kaas-sonobuoy-go-example-e2e-framework/Dockerfile +++ b/Tests/kaas/kaas-sonobuoy-go-example-e2e-framework/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.17-buster as build +FROM golang:1.23 # Install kubectl # Note: Latest version may be found on: diff --git a/playbooks/adr_syntax.yaml b/playbooks/adr_syntax.yaml index a816fcab0..3d10a0292 100644 --- a/playbooks/adr_syntax.yaml +++ b/playbooks/adr_syntax.yaml @@ -3,15 +3,17 @@ hosts: all tasks: - name: Run ADR syntax check script - ansible.builtin.shell: | - python3 ~/Tests/chk_adrs.py ~/Standards + ansible.builtin.command: + cmd: python3 Tests/chk_adrs.py Standards + chdir: "{{ ansible_user_dir }}/{{ zuul.project.src_dir }}" register: result changed_when: true failed_when: result.rc != 0 - name: Run test script consistency check script - ansible.builtin.shell: | - python3 ~/Tests/iaas/flavor-naming/check_yaml.py ~/Tests/iaas + ansible.builtin.shell: + cmd: python3 Tests/iaas/flavor-naming/check_yaml.py Tests/iaas + chdir: "{{ ansible_user_dir }}/{{ zuul.project.src_dir }}" register: result changed_when: true failed_when: result.rc != 0 diff --git a/playbooks/compliance_check.yaml b/playbooks/compliance_check.yaml index ab10b6ce9..d74d6e4ef 100644 --- a/playbooks/compliance_check.yaml +++ b/playbooks/compliance_check.yaml @@ -3,23 +3,20 @@ hosts: all tasks: - name: Run compliance script - ansible.builtin.shell: > - python3 ~/Tests/scs-test-runner.py --config ~/Tests/config.toml --debug run --preset {{ preset }} --output report.yaml + # write report.yaml into the proper directory so it will be transferred back by base job + # -- this then works regardless of VM/pod + ansible.builtin.command: + cmd: > + python3 Tests/scs-test-runner.py --config Tests/config.toml --debug + run --preset {{ preset }} + --output "{{ ansible_user_dir }}/zuul-output/artifacts/report.yaml" + chdir: "{{ ansible_user_dir }}/{{ zuul.project.src_dir }}" changed_when: true - - name: Copy result YAML - ansible.builtin.synchronize: - dest: "{{ zuul.executor.log_root }}/report.yaml" - mode: pull - src: "report.yaml" - verify_host: true - owner: no - group: no - - name: Return artifact URL zuul_return: data: zuul: artifacts: - name: "report.yaml" - url: "report.yaml" + url: "artifacts/report.yaml" diff --git a/playbooks/pre.yaml b/playbooks/pre.yaml index a47102f13..66e81f356 100644 --- a/playbooks/pre.yaml +++ b/playbooks/pre.yaml @@ -4,20 +4,7 @@ roles: - role: ensure-pip # https://zuul-ci.org/docs/zuul-jobs/latest/python-roles.html#role-ensure-pip tasks: - - name: Copy ADRs on the node - ansible.builtin.copy: - src: "../Standards" - dest: "~/" - mode: 0500 - no_log: false - - - name: Copy Tests on the node - ansible.builtin.copy: - src: "../Tests" - dest: "~/" - mode: 0500 - no_log: false - - name: Install dependencies ansible.builtin.pip: - requirements: ~/Tests/requirements.txt + chdir: "{{ ansible_user_dir }}/{{ zuul.project.src_dir }}" + requirements: "Tests/requirements.txt" diff --git a/playbooks/pre_cloud.yaml b/playbooks/pre_cloud.yaml index e1ac04043..9a59fa410 100644 --- a/playbooks/pre_cloud.yaml +++ b/playbooks/pre_cloud.yaml @@ -2,7 +2,6 @@ - name: Prepare cloud config and ensure clean env hosts: all roles: - - role: ensure-pip # https://zuul-ci.org/docs/zuul-jobs/latest/python-roles.html#role-ensure-pip - role: bindep # https://zuul-ci.org/docs/zuul-jobs/latest/general-roles.html#role-bindep tasks: - name: Create cloud config dir @@ -21,7 +20,7 @@ - name: Create secrets dir ansible.builtin.file: - path: "~/Tests/.secret" + path: "{{ ansible_user_dir }}/{{ zuul.project.src_dir }}/Tests/.secret" state: directory recurse: true mode: "0700" @@ -30,18 +29,19 @@ ansible.builtin.copy: # the secrets are usually stripped of whitespace, but the final newline is essential here content: "{{ clouds_conf.zuul_ci_signing_key + '\n' }}" - dest: "~/Tests/.secret/keyfile" + dest: "{{ ansible_user_dir }}/{{ zuul.project.src_dir }}/Tests/.secret/keyfile" mode: "0600" no_log: true - name: Create basic_auth token file ansible.builtin.copy: content: "{{ clouds_conf.zuul_ci_basic_auth }}" - dest: "~/Tests/.secret/tokenfile" + dest: "{{ ansible_user_dir }}/{{ zuul.project.src_dir }}/Tests/.secret/tokenfile" mode: "0600" no_log: true - name: Clean up any lingering resources from previous run - ansible.builtin.shell: > - python3 ~/Tests/scs-test-runner.py --config ~/Tests/config.toml --debug cleanup --preset {{ preset }} + ansible.builtin.command: + cmd: python3 Tests/scs-test-runner.py --config Tests/config.toml --debug cleanup --preset {{ preset }} + chdir: "{{ ansible_user_dir }}/{{ zuul.project.src_dir }}" changed_when: true