From ce2baeecf973034c2e00ebfc65bf48815d67f6d3 Mon Sep 17 00:00:00 2001 From: C Cheng <10414576+ccheng26@users.noreply.github.com> Date: Mon, 24 Jun 2024 12:31:44 -0400 Subject: [PATCH] DST-257: install infra temp (#3) Resolves #DST-257 Configured infra template --- .dockleconfig | 6 +- .../configure-aws-credentials/action.yml | 58 ++ .github/pull_request_template.md | 19 +- .github/workflows/README.md | 47 + .github/workflows/build-and-publish.yml | 76 ++ .github/workflows/cd-app.yml | 33 + .github/workflows/check-infra-auth.yml | 30 + .../workflows/ci-app-vulnerability-scans.yml | 26 + .github/workflows/ci-docs.yml | 20 + .github/workflows/ci-infra-service.yml | 50 + .github/workflows/ci-infra.yml | 95 ++ .github/workflows/database-migrations.yml | 44 + .github/workflows/deploy.yml | 43 + .github/workflows/markdownlint-config.json | 19 + .github/workflows/vulnerability-scans.yml | 127 +++ .gitignore | 2 - .grype.yml | 20 + .hadolint.yaml | 7 +- .template-version | 1 + .trivyignore | 9 + Makefile | 232 +++++ bin/account-ids-by-name | 23 + bin/check-database-roles | 53 + bin/check-github-actions-auth | 48 + bin/configure-monitoring-secret | 43 + bin/create-or-update-database-roles | 55 + bin/create-tfbackend | 56 + bin/current-account-alias | 4 + bin/current-account-config-name | 12 + bin/current-account-id | 4 + bin/current-region | 4 + bin/deploy-release | 30 + bin/is-image-published | 25 + bin/lint-markdown | 17 + bin/publish-release | 46 + bin/run-command | 220 ++++ bin/run-database-migrations | 62 ++ bin/set-up-current-account | 110 ++ bin/terraform-apply | 41 + bin/terraform-init | 27 + bin/terraform-init-and-apply | 23 + docs/code-reviews.md | 55 + docs/compliance.md | 29 + docs/decisions/index.md | 15 +- ...markdown-architectural-decision-records.md | 26 + docs/decisions/infra/0001-ci-cd-interface.md | 113 ++ ...se-custom-implementation-of-github-oidc.md | 38 + .../0003-manage-ecr-in-prod-account-module.md | 33 + ...kend-configs-into-separate-config-files.md | 36 + ...base-infrastructure-into-separate-layer.md | 66 ++ ...database-users-with-serverless-function.md | 87 ++ .../0007-database-migration-architecture.md | 92 ++ ...ig-from-tfvars-files-into-config-module.md | 30 + ...separate-app-infrastructure-into-layers.md | 40 + .../infra/0010-feature-flags-system-design.md | 124 +++ .../infra/0011-network-layer-design.md | 135 +++ docs/decisions/template.md | 2 +- docs/feature-flags.md | 31 + docs/infra/background-jobs.md | 16 + docs/infra/cloud-access-control.md | 7 + docs/infra/database-access-control.md | 24 + docs/infra/destroy-infrastructure.md | 59 ++ .../environment-variables-and-secrets.md | 62 ++ docs/infra/https-support.md | 40 + docs/infra/intro-to-terraform-workspaces.md | 59 ++ docs/infra/intro-to-terraform.md | 33 + docs/infra/making-infra-changes.md | 56 + docs/infra/module-architecture.md | 92 ++ docs/infra/module-dependencies.md | 92 ++ docs/infra/service-command-execution.md | 76 ++ docs/infra/set-up-app-build-repository.md | 40 + docs/infra/set-up-app-env.md | 64 ++ docs/infra/set-up-aws-account.md | 50 + docs/infra/set-up-custom-domains.md | 73 ++ docs/infra/set-up-database.md | 110 ++ docs/infra/set-up-infrastructure-tools.md | 108 ++ docs/infra/set-up-monitoring-alerts.md | 29 + docs/infra/set-up-network.md | 40 + docs/infra/set-up-public-internet-access.md | 32 + docs/infra/style-guide.md | 52 + docs/infra/upgrade-database.md | 84 ++ docs/infra/vulnerability-management.md | 39 + docs/releases.md | 21 + docs/system-architecture.md | 21 + infra/.gitignore | 28 + infra/README.md | 85 ++ infra/accounts/main.tf | 54 + infra/accounts/outputs.tf | 23 + infra/app/app-config/dev.tf | 20 + infra/app/app-config/env-config/database.tf | 17 + .../env-config/environment-variables.tf | 43 + .../app-config/env-config/file_upload_jobs.tf | 14 + infra/app/app-config/env-config/main.tf | 8 + infra/app/app-config/env-config/outputs.tf | 51 + infra/app/app-config/env-config/variables.tf | 89 ++ infra/app/app-config/feature-flags.tf | 3 + infra/app/app-config/main.tf | 47 + infra/app/app-config/outputs.tf | 49 + infra/app/app-config/prod.tf | 25 + infra/app/app-config/staging.tf | 18 + infra/app/build-repository/main.tf | 59 ++ infra/app/database/main.tf | 92 ++ infra/app/database/outputs.tf | 3 + infra/app/database/variables.tf | 4 + infra/app/service/image_tag.tf | 56 + infra/app/service/main.tf | 193 ++++ infra/app/service/outputs.tf | 24 + infra/app/service/secrets.tf | 16 + infra/app/service/variables.tf | 10 + infra/example.s3.tfbackend | 4 + infra/modules/auth-github-actions/README.md | 16 + infra/modules/auth-github-actions/main.tf | 59 ++ .../modules/auth-github-actions/variables.tf | 14 + .../container-image-repository/main.tf | 87 ++ .../container-image-repository/outputs.tf | 11 + .../container-image-repository/variables.tf | 15 + infra/modules/database/.gitignore | 1 + infra/modules/database/authentication.tf | 42 + infra/modules/database/backups.tf | 69 ++ infra/modules/database/main.tf | 97 ++ infra/modules/database/monitoring.tf | 28 + infra/modules/database/networking.tf | 54 + infra/modules/database/outputs.tf | 3 + infra/modules/database/role-manager.tf | 155 +++ infra/modules/database/role_manager/check.py | 63 ++ infra/modules/database/role_manager/db.py | 56 + infra/modules/database/role_manager/manage.py | 216 ++++ .../database/role_manager/requirements.txt | 1 + .../database/role_manager/role_manager.py | 11 + infra/modules/database/variables.tf | 67 ++ infra/modules/domain/certificates.tf | 59 ++ infra/modules/domain/main.tf | 10 + infra/modules/domain/outputs.tf | 9 + infra/modules/domain/query-logs.tf | 43 + infra/modules/domain/variables.tf | 51 + infra/modules/feature-flags/access-policy.tf | 21 + infra/modules/feature-flags/logs.tf | 47 + infra/modules/feature-flags/main.tf | 49 + infra/modules/feature-flags/outputs.tf | 9 + infra/modules/feature-flags/variables.tf | 9 + infra/modules/monitoring/main.tf | 85 ++ infra/modules/monitoring/outputs.tf | 3 + infra/modules/monitoring/variables.tf | 22 + infra/modules/network/main.tf | 40 + infra/modules/network/variables.tf | 32 + infra/modules/network/vpc-endpoints.tf | 95 ++ infra/modules/secret/main.tf | 26 + infra/modules/secret/outputs.tf | 3 + infra/modules/secret/variables.tf | 22 + infra/modules/service/access-control.tf | 87 ++ infra/modules/service/access-logs.tf | 107 ++ infra/modules/service/application-logs.tf | 15 + infra/modules/service/command-execution.tf | 31 + infra/modules/service/database-access.tf | 29 + infra/modules/service/dns.tf | 13 + infra/modules/service/jobs.tf | 106 ++ infra/modules/service/load-balancer.tf | 135 +++ infra/modules/service/main.tf | 143 +++ infra/modules/service/networking.tf | 88 ++ infra/modules/service/outputs.tf | 26 + infra/modules/service/task-scheduler-role.tf | 57 ++ infra/modules/service/variables.tf | 157 +++ infra/modules/storage/access-control.tf | 65 ++ infra/modules/storage/encryption.tf | 18 + infra/modules/storage/events.tf | 7 + infra/modules/storage/lifecycle.tf | 11 + infra/modules/storage/main.tf | 12 + infra/modules/storage/outputs.tf | 3 + infra/modules/storage/variables.tf | 4 + infra/modules/terraform-backend-s3/README.md | 7 + infra/modules/terraform-backend-s3/main.tf | 277 +++++ infra/modules/terraform-backend-s3/outputs.tf | 11 + .../modules/terraform-backend-s3/variables.tf | 4 + infra/networks/main.tf | 86 ++ infra/networks/outputs.tf | 11 + infra/networks/variables.tf | 4 + infra/project-config/README.md | 19 + infra/project-config/aws-services.tf | 35 + infra/project-config/main.tf | 20 + infra/project-config/networks.tf | 55 + infra/project-config/outputs.tf | 49 + infra/test/go.mod | 59 ++ infra/test/go.sum | 968 ++++++++++++++++++ infra/test/helpers.go | 20 + infra/test/infra_test.go | 152 +++ 185 files changed, 9792 insertions(+), 32 deletions(-) create mode 100644 .github/actions/configure-aws-credentials/action.yml create mode 100644 .github/workflows/README.md create mode 100644 .github/workflows/build-and-publish.yml create mode 100644 .github/workflows/cd-app.yml create mode 100644 .github/workflows/check-infra-auth.yml create mode 100644 .github/workflows/ci-app-vulnerability-scans.yml create mode 100644 .github/workflows/ci-docs.yml create mode 100644 .github/workflows/ci-infra-service.yml create mode 100644 .github/workflows/ci-infra.yml create mode 100644 .github/workflows/database-migrations.yml create mode 100644 .github/workflows/deploy.yml create mode 100644 .github/workflows/markdownlint-config.json create mode 100644 .github/workflows/vulnerability-scans.yml create mode 100644 .grype.yml create mode 100644 .template-version create mode 100644 .trivyignore create mode 100644 Makefile create mode 100755 bin/account-ids-by-name create mode 100755 bin/check-database-roles create mode 100755 bin/check-github-actions-auth create mode 100755 bin/configure-monitoring-secret create mode 100755 bin/create-or-update-database-roles create mode 100755 bin/create-tfbackend create mode 100755 bin/current-account-alias create mode 100755 bin/current-account-config-name create mode 100755 bin/current-account-id create mode 100755 bin/current-region create mode 100755 bin/deploy-release create mode 100755 bin/is-image-published create mode 100755 bin/lint-markdown create mode 100755 bin/publish-release create mode 100755 bin/run-command create mode 100755 bin/run-database-migrations create mode 100755 bin/set-up-current-account create mode 100755 bin/terraform-apply create mode 100755 bin/terraform-init create mode 100755 bin/terraform-init-and-apply create mode 100644 docs/code-reviews.md create mode 100644 docs/compliance.md create mode 100644 docs/decisions/infra/0000-use-markdown-architectural-decision-records.md create mode 100644 docs/decisions/infra/0001-ci-cd-interface.md create mode 100644 docs/decisions/infra/0002-use-custom-implementation-of-github-oidc.md create mode 100644 docs/decisions/infra/0003-manage-ecr-in-prod-account-module.md create mode 100644 docs/decisions/infra/0004-separate-terraform-backend-configs-into-separate-config-files.md create mode 100644 docs/decisions/infra/0005-separate-database-infrastructure-into-separate-layer.md create mode 100644 docs/decisions/infra/0006-provision-database-users-with-serverless-function.md create mode 100644 docs/decisions/infra/0007-database-migration-architecture.md create mode 100644 docs/decisions/infra/0008-consolidate-infra-config-from-tfvars-files-into-config-module.md create mode 100644 docs/decisions/infra/0009-separate-app-infrastructure-into-layers.md create mode 100644 docs/decisions/infra/0010-feature-flags-system-design.md create mode 100644 docs/decisions/infra/0011-network-layer-design.md create mode 100644 docs/feature-flags.md create mode 100644 docs/infra/background-jobs.md create mode 100644 docs/infra/cloud-access-control.md create mode 100644 docs/infra/database-access-control.md create mode 100644 docs/infra/destroy-infrastructure.md create mode 100644 docs/infra/environment-variables-and-secrets.md create mode 100644 docs/infra/https-support.md create mode 100644 docs/infra/intro-to-terraform-workspaces.md create mode 100644 docs/infra/intro-to-terraform.md create mode 100644 docs/infra/making-infra-changes.md create mode 100644 docs/infra/module-architecture.md create mode 100644 docs/infra/module-dependencies.md create mode 100644 docs/infra/service-command-execution.md create mode 100644 docs/infra/set-up-app-build-repository.md create mode 100644 docs/infra/set-up-app-env.md create mode 100644 docs/infra/set-up-aws-account.md create mode 100644 docs/infra/set-up-custom-domains.md create mode 100644 docs/infra/set-up-database.md create mode 100644 docs/infra/set-up-infrastructure-tools.md create mode 100644 docs/infra/set-up-monitoring-alerts.md create mode 100644 docs/infra/set-up-network.md create mode 100644 docs/infra/set-up-public-internet-access.md create mode 100644 docs/infra/style-guide.md create mode 100644 docs/infra/upgrade-database.md create mode 100644 docs/infra/vulnerability-management.md create mode 100644 docs/releases.md create mode 100644 docs/system-architecture.md create mode 100644 infra/.gitignore create mode 100644 infra/README.md create mode 100644 infra/accounts/main.tf create mode 100644 infra/accounts/outputs.tf create mode 100644 infra/app/app-config/dev.tf create mode 100644 infra/app/app-config/env-config/database.tf create mode 100644 infra/app/app-config/env-config/environment-variables.tf create mode 100644 infra/app/app-config/env-config/file_upload_jobs.tf create mode 100644 infra/app/app-config/env-config/main.tf create mode 100644 infra/app/app-config/env-config/outputs.tf create mode 100644 infra/app/app-config/env-config/variables.tf create mode 100644 infra/app/app-config/feature-flags.tf create mode 100644 infra/app/app-config/main.tf create mode 100644 infra/app/app-config/outputs.tf create mode 100644 infra/app/app-config/prod.tf create mode 100644 infra/app/app-config/staging.tf create mode 100644 infra/app/build-repository/main.tf create mode 100644 infra/app/database/main.tf create mode 100644 infra/app/database/outputs.tf create mode 100644 infra/app/database/variables.tf create mode 100644 infra/app/service/image_tag.tf create mode 100644 infra/app/service/main.tf create mode 100644 infra/app/service/outputs.tf create mode 100644 infra/app/service/secrets.tf create mode 100644 infra/app/service/variables.tf create mode 100644 infra/example.s3.tfbackend create mode 100644 infra/modules/auth-github-actions/README.md create mode 100644 infra/modules/auth-github-actions/main.tf create mode 100644 infra/modules/auth-github-actions/variables.tf create mode 100644 infra/modules/container-image-repository/main.tf create mode 100644 infra/modules/container-image-repository/outputs.tf create mode 100644 infra/modules/container-image-repository/variables.tf create mode 100644 infra/modules/database/.gitignore create mode 100644 infra/modules/database/authentication.tf create mode 100644 infra/modules/database/backups.tf create mode 100644 infra/modules/database/main.tf create mode 100644 infra/modules/database/monitoring.tf create mode 100644 infra/modules/database/networking.tf create mode 100644 infra/modules/database/outputs.tf create mode 100644 infra/modules/database/role-manager.tf create mode 100644 infra/modules/database/role_manager/check.py create mode 100644 infra/modules/database/role_manager/db.py create mode 100644 infra/modules/database/role_manager/manage.py create mode 100644 infra/modules/database/role_manager/requirements.txt create mode 100644 infra/modules/database/role_manager/role_manager.py create mode 100644 infra/modules/database/variables.tf create mode 100644 infra/modules/domain/certificates.tf create mode 100644 infra/modules/domain/main.tf create mode 100644 infra/modules/domain/outputs.tf create mode 100644 infra/modules/domain/query-logs.tf create mode 100644 infra/modules/domain/variables.tf create mode 100644 infra/modules/feature-flags/access-policy.tf create mode 100644 infra/modules/feature-flags/logs.tf create mode 100644 infra/modules/feature-flags/main.tf create mode 100644 infra/modules/feature-flags/outputs.tf create mode 100644 infra/modules/feature-flags/variables.tf create mode 100644 infra/modules/monitoring/main.tf create mode 100644 infra/modules/monitoring/outputs.tf create mode 100644 infra/modules/monitoring/variables.tf create mode 100644 infra/modules/network/main.tf create mode 100644 infra/modules/network/variables.tf create mode 100644 infra/modules/network/vpc-endpoints.tf create mode 100644 infra/modules/secret/main.tf create mode 100644 infra/modules/secret/outputs.tf create mode 100644 infra/modules/secret/variables.tf create mode 100644 infra/modules/service/access-control.tf create mode 100644 infra/modules/service/access-logs.tf create mode 100644 infra/modules/service/application-logs.tf create mode 100644 infra/modules/service/command-execution.tf create mode 100644 infra/modules/service/database-access.tf create mode 100644 infra/modules/service/dns.tf create mode 100644 infra/modules/service/jobs.tf create mode 100644 infra/modules/service/load-balancer.tf create mode 100644 infra/modules/service/main.tf create mode 100644 infra/modules/service/networking.tf create mode 100644 infra/modules/service/outputs.tf create mode 100644 infra/modules/service/task-scheduler-role.tf create mode 100644 infra/modules/service/variables.tf create mode 100644 infra/modules/storage/access-control.tf create mode 100644 infra/modules/storage/encryption.tf create mode 100644 infra/modules/storage/events.tf create mode 100644 infra/modules/storage/lifecycle.tf create mode 100644 infra/modules/storage/main.tf create mode 100644 infra/modules/storage/outputs.tf create mode 100644 infra/modules/storage/variables.tf create mode 100644 infra/modules/terraform-backend-s3/README.md create mode 100644 infra/modules/terraform-backend-s3/main.tf create mode 100644 infra/modules/terraform-backend-s3/outputs.tf create mode 100644 infra/modules/terraform-backend-s3/variables.tf create mode 100644 infra/networks/main.tf create mode 100644 infra/networks/outputs.tf create mode 100644 infra/networks/variables.tf create mode 100644 infra/project-config/README.md create mode 100644 infra/project-config/aws-services.tf create mode 100644 infra/project-config/main.tf create mode 100644 infra/project-config/networks.tf create mode 100644 infra/project-config/outputs.tf create mode 100644 infra/test/go.mod create mode 100644 infra/test/go.sum create mode 100644 infra/test/helpers.go create mode 100644 infra/test/infra_test.go diff --git a/.dockleconfig b/.dockleconfig index 2c7e44a1..f7bc9767 100644 --- a/.dockleconfig +++ b/.dockleconfig @@ -1,8 +1,4 @@ # This file is allows you to specify a list of files that is acceptable to Dockle # To allow multiple files, use a list of names, example below. Make sure to remove the leading # # DOCKLE_ACCEPT_FILES="file1,path/to/file2,file3/path,etc" -# https://github.com/goodwithtech/dockle#accept-suspicious-environment-variables--files--file-extensions -# DOCKLE_ACCEPT_FILES="file1,path/to/file2,file3/path,etc" - -# The apiflask/settings file is a stub file that apiflask creates, and has no sensitive data in. We are ignoring it since it is unused -DOCKLE_ACCEPT_FILES=app/.venv/lib/python3.12/site-packages/apiflask/settings.py +# https://github.com/goodwithtech/dockle#accept-suspicious-environment-variables--files--file-extensions \ No newline at end of file diff --git a/.github/actions/configure-aws-credentials/action.yml b/.github/actions/configure-aws-credentials/action.yml new file mode 100644 index 00000000..42f1e963 --- /dev/null +++ b/.github/actions/configure-aws-credentials/action.yml @@ -0,0 +1,58 @@ +name: 'Configure AWS Credentials' +description: 'Configure AWS Credentials for a given application and | + environment so that the GitHub Actions workflow can access AWS resources. | + This is a wrapper around https://github.com/aws-actions/configure-aws-credentials | + that first determines the account, role, and region based on the | + account_names_by_environment configuration in app-config' +inputs: + app_name: + description: 'Name of application folder under /infra' + required: true + environment: + description: 'Name of environment (dev, staging, prod) that AWS resources live in, or "shared" for resources that are shared across environments' + required: true +runs: + using: "composite" + steps: + - name: Get AWS account authentication details (AWS account, IAM role, AWS region) + run: | + # Get AWS account authentication details (AWS account, IAM role, AWS region) + # associated with the application environment to figure out which AWS + # account to log into, which IAM role to assume, and which AWS region to use + + echo "::group::AWS account authentication details" + + terraform -chdir=infra/project-config init > /dev/null + terraform -chdir=infra/project-config apply -auto-approve > /dev/null + AWS_REGION=$(terraform -chdir=infra/project-config output -raw default_region) + echo "AWS_REGION=$AWS_REGION" + GITHUB_ACTIONS_ROLE_NAME=$(terraform -chdir=infra/project-config output -raw github_actions_role_name) + echo "GITHUB_ACTIONS_ROLE_NAME=$GITHUB_ACTIONS_ROLE_NAME" + + terraform -chdir=infra/${{ inputs.app_name }}/app-config init > /dev/null + terraform -chdir=infra/${{ inputs.app_name }}/app-config apply -auto-approve > /dev/null + ACCOUNT_NAME=$(terraform -chdir=infra/${{ inputs.app_name }}/app-config output -json account_names_by_environment | jq -r .${{ inputs.environment }}) + echo "ACCOUNT_NAME=$ACCOUNT_NAME" + + # Get the account id associated with the account name extracting the + # ACCOUNT_ID part of the tfbackend file name which looks like + # ..s3.tfbackend. + # The cut command splits the string with period as the delimeter and + # extracts the second field. + ACCOUNT_ID=$(ls infra/accounts/$ACCOUNT_NAME.*.s3.tfbackend | cut -d. -f2) + echo "ACCOUNT_ID=$ACCOUNT_ID" + + AWS_ROLE_TO_ASSUME=arn:aws:iam::$ACCOUNT_ID:role/$GITHUB_ACTIONS_ROLE_NAME + echo "AWS_ROLE_TO_ASSUME=$AWS_ROLE_TO_ASSUME" + + echo "::endgroup::" + + echo "Setting env vars AWS_ROLE_TO_ASSUME and AWS_REGION..." + echo "AWS_ROLE_TO_ASSUME=$AWS_ROLE_TO_ASSUME" >> "$GITHUB_ENV" + echo "AWS_REGION=$AWS_REGION" >> "$GITHUB_ENV" + shell: bash + - name: Configure AWS credentials + uses: aws-actions/configure-aws-credentials@v3 + with: + role-to-assume: ${{ env.AWS_ROLE_TO_ASSUME }} + aws-region: ${{ env.AWS_REGION }} diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md index 6b9dd048..a7db834a 100644 --- a/.github/pull_request_template.md +++ b/.github/pull_request_template.md @@ -1,28 +1,15 @@ -- [ ] Update PR Title to follow this pattern: `[INTENT]: [MESSAGE]` - -> The title will become a one-line commit message in the git log, so be as concise and specific as possible -- refer to [How to Write a Git Commit Message](https://cbea.ms/git-commit/). Prepend [Conventional Commit](https://www.conventionalcommits.org/en/v1.0.0/#summary) intent (`fix:`, `feat:`, `chore:`, `ci:`, `docs:`, `style:`, `refactor:`, `perf:`, `test:`). - ## Ticket -Resolves #{TICKET NUMBER or URL or description} - +Resolves #{TICKET NUMBER OR URL} ## Changes > What was added, updated, or removed in this PR. -> Prefer small PRs; try to limit to 300 lines of code changes -> * https://blog.logrocket.com/using-stacked-pull-requests-in-github/ -> * https://opensource.com/article/18/6/anatomy-perfect-pull-request -> * https://developers.google.com/blockly/guides/modify/contribute/write_a_good_pr - ## Context for reviewers -> Background context, more in-depth details of the implementation, and anything else you'd like to call out or ask reviewers. -> Add comments to your code under the "Files Changed" tab to explain complex logic or code -> * https://betterprogramming.pub/how-to-make-a-perfect-pull-request-3578fb4c112 - +> Testing instructions, background context, more in-depth details of the implementation, and anything else you'd like to call out or ask reviewers. ## Testing -> Provide evidence that the code works as expected. Explain what was done for testing and the results of the test plan. Include screenshots, [GIF demos](https://www.cockos.com/licecap/), shell commands or output to help show the changes working as expected. ProTip: you can drag and drop or paste images into this textbox. \ No newline at end of file +> Provide evidence that the code works as expected. Explain what was done for testing and the results of the test plan. Include screenshots, [GIF demos](https://www.cockos.com/licecap/), shell commands or output to help show the changes working as expected. ProTip: you can drag and drop or paste images into this textbox. diff --git a/.github/workflows/README.md b/.github/workflows/README.md new file mode 100644 index 00000000..d10928f7 --- /dev/null +++ b/.github/workflows/README.md @@ -0,0 +1,47 @@ +# CI/CD + +The CI/CD for this project uses [reusable Github Actions workflows](https://docs.github.com/en/actions/using-workflows/reusing-workflows). + +## ๐Ÿงช CI + +### Per app workflows + +Each app should have: + +- `ci-[app_name]`: must be created; should run linting and testing +- `ci-[app_name]-vulnerability-scans`: calls `vulnerability-scans` + - Based on [ci-app-vulnerability-scans](https://github.com/navapbc/template-infra/blob/main/.github/workflows/ci-app-vulnerability-scans.yml) + +### App-agnostic workflows + +- [`ci-docs`](./ci-docs.yml): runs markdown linting on all markdown files in the file + - Configure in [markdownlint-config.json](./markdownlint-config.json) +- [`ci-infra`](./ci-infra.yml): run infrastructure CI checks + +## ๐Ÿšข CD + +Each app should have: + +- `cd-[app_name]`: deploys an application + - Based on [`cd-app`](https://github.com/navapbc/template-infra/blob/main/.github/workflows/cd-app.yml) + +The CD workflow uses these reusable workflows: + +- [`deploy`](./deploy.yml): deploys an application +- [`database-migrations`](./database-migrations.yml): runs database migrations for an application +- [`build-and-publish`](./build-and-publish.yml): builds a container image for an application and publishes it to an image repository + +```mermaid +graph TD + cd-app + deploy + database-migrations + build-and-publish + + cd-app-->|calls|deploy-->|calls|database-migrations-->|calls|build-and-publish +``` + +## โ›‘๏ธ Helper workflows + +- [`check-infra-auth`](./check-infra-auth.yml): verifes that the project's Github repo is able to connect to AWS + diff --git a/.github/workflows/build-and-publish.yml b/.github/workflows/build-and-publish.yml new file mode 100644 index 00000000..2b3d6181 --- /dev/null +++ b/.github/workflows/build-and-publish.yml @@ -0,0 +1,76 @@ +name: Build and publish +run-name: Build and publish ${{ inputs.app_name }}:${{ inputs.ref }} + +on: + workflow_call: + inputs: + app_name: + description: "name of application folder under infra directory" + required: true + type: string + ref: + description: The branch, tag or SHA to checkout. When checking out the repository that triggered a workflow, this defaults to the reference or SHA for that event. Otherwise, use branch or tag that triggered the workflow run. + required: true + type: string + workflow_dispatch: + inputs: + app_name: + description: "name of application folder under infra directory" + required: true + type: string + ref: + description: The branch, tag or SHA to checkout. When checking out the repository that triggered a workflow, this defaults to the reference or SHA for that event. Otherwise, use branch or tag that triggered the workflow run. + required: true + type: string + +jobs: + get-commit-hash: + name: Get commit hash + runs-on: ubuntu-latest + outputs: + commit_hash: ${{ steps.get-commit-hash.outputs.commit_hash }} + steps: + - uses: actions/checkout@v3 + with: + ref: ${{ inputs.ref }} + - name: Get commit hash + id: get-commit-hash + run: | + COMMIT_HASH=$(git rev-parse ${{ inputs.ref }}) + echo "Commit hash: $COMMIT_HASH" + echo "commit_hash=$COMMIT_HASH" >> "$GITHUB_OUTPUT" + build-and-publish: + name: Build and publish + runs-on: ubuntu-latest + needs: get-commit-hash + concurrency: ${{ github.workflow }}-${{ needs.get-commit-hash.outputs.commit_hash }} + + permissions: + contents: read + id-token: write + + steps: + - uses: actions/checkout@v3 + with: + ref: ${{ inputs.ref }} + + - name: Configure AWS credentials + uses: ./.github/actions/configure-aws-credentials + with: + app_name: ${{ inputs.app_name }} + environment: shared + + - name: Check if image is already published + id: check-image-published + run: | + is_image_published=$(./bin/is-image-published "${{ inputs.app_name }}" "${{ inputs.ref }}") + echo "Is image published: $is_image_published" + echo "is_image_published=$is_image_published" >> "$GITHUB_OUTPUT" + + - name: Build release + if: steps.check-image-published.outputs.IS_IMAGE_PUBLISHED == 'false' + run: make APP_NAME=${{ inputs.app_name }} release-build + + - name: Publish release + if: steps.check-image-published.outputs.IS_IMAGE_PUBLISHED == 'false' + run: make APP_NAME=${{ inputs.app_name }} release-publish diff --git a/.github/workflows/cd-app.yml b/.github/workflows/cd-app.yml new file mode 100644 index 00000000..abd4660f --- /dev/null +++ b/.github/workflows/cd-app.yml @@ -0,0 +1,33 @@ +name: Deploy App +# Need to set a default value for when the workflow is triggered from a git push +# which bypasses the default configuration for inputs +run-name: Deploy ${{ github.ref_name }} to App ${{ inputs.environment || 'dev' }} + +on: + # !! Uncomment the following lines once you've set up the dev environment and ready to turn on continuous deployment + # push: + # branches: + # - "main" + # paths: + # - "app/**" + # - "bin/**" + # - "infra/**" + workflow_dispatch: + inputs: + environment: + description: "target environment" + required: true + default: "dev" + type: choice + options: + - dev + - staging + - prod + +jobs: + deploy: + name: Deploy + uses: ./.github/workflows/deploy.yml + with: + app_name: "app" + environment: ${{ inputs.environment || 'dev' }} diff --git a/.github/workflows/check-infra-auth.yml b/.github/workflows/check-infra-auth.yml new file mode 100644 index 00000000..3e908148 --- /dev/null +++ b/.github/workflows/check-infra-auth.yml @@ -0,0 +1,30 @@ +name: Check GitHub Actions AWS Authentication + +on: + workflow_dispatch: + inputs: + aws_region: + description: AWS region + default: us-east-1 + required: false + role_to_assume: + description: ARN of IAM role to assume + required: true + +permissions: + contents: read + id-token: write + +jobs: + caller-identity: + name: Check caller identity + runs-on: ubuntu-latest + steps: + - name: Checkout code + uses: actions/checkout@v3 + - name: Configure AWS credentials + uses: aws-actions/configure-aws-credentials@v3 + with: + aws-region: ${{ inputs.aws_region }} + role-to-assume: ${{ inputs.role_to_assume }} + - run: aws sts get-caller-identity diff --git a/.github/workflows/ci-app-vulnerability-scans.yml b/.github/workflows/ci-app-vulnerability-scans.yml new file mode 100644 index 00000000..e20236f4 --- /dev/null +++ b/.github/workflows/ci-app-vulnerability-scans.yml @@ -0,0 +1,26 @@ +name: CI Vulnerability Scans + +on: + push: + branches: + - main + paths: + - app/** + - .grype.yml + - .hadolint.yaml + - .trivyignore + - .github/workflows/ci-app-vulnerability-scans.yml + pull_request: + paths: + - app/** + - .grype.yml + - .hadolint.yaml + - .trivyignore + - .github/workflows/ci-app-vulnerability-scans.yml + +jobs: + vulnerability-scans: + name: Vulnerability Scans + uses: ./.github/workflows/vulnerability-scans.yml + with: + app_name: "app" diff --git a/.github/workflows/ci-docs.yml b/.github/workflows/ci-docs.yml new file mode 100644 index 00000000..8ce042be --- /dev/null +++ b/.github/workflows/ci-docs.yml @@ -0,0 +1,20 @@ +name: CI Documentation Checks + +on: + push: + branches: + - main + pull_request: + + +jobs: + lint-markdown: + name: Lint markdown + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + # This is the GitHub Actions-friendly port of the linter used in the Makefile. + - uses: gaurav-nelson/github-action-markdown-link-check@1.0.15 + with: + use-quiet-mode: 'yes' # errors only. + config-file: '.github/workflows/markdownlint-config.json' diff --git a/.github/workflows/ci-infra-service.yml b/.github/workflows/ci-infra-service.yml new file mode 100644 index 00000000..6e3bed7c --- /dev/null +++ b/.github/workflows/ci-infra-service.yml @@ -0,0 +1,50 @@ +name: CI Infra Service Checks + +on: + # !! Uncomment to trigger automated infra tests once dev environment is set up + # push: + # branches: + # - main + # paths: + # - infra/*/service/** + # - infra/modules/** + # - infra/test/** + # - .github/workflows/ci-infra-service.yml + # pull_request: + # paths: + # - infra/*/service/** + # - infra/modules/** + # - infra/test/** + # - .github/workflows/ci-infra-service.yml + workflow_dispatch: + +jobs: + infra-test-e2e: + name: Test service + runs-on: ubuntu-latest + + permissions: + contents: read + id-token: write + + steps: + - uses: actions/checkout@v3 + + - uses: hashicorp/setup-terraform@v2 + with: + terraform_version: 1.8.3 + terraform_wrapper: false + + - uses: actions/setup-go@v3 + with: + go-version: ">=1.19.0" + + - name: Configure AWS credentials + uses: ./.github/actions/configure-aws-credentials + with: + app_name: app + # Run infra CI on dev environment + environment: dev + + - name: Run Terratest + run: make infra-test-service diff --git a/.github/workflows/ci-infra.yml b/.github/workflows/ci-infra.yml new file mode 100644 index 00000000..f7f9a4bf --- /dev/null +++ b/.github/workflows/ci-infra.yml @@ -0,0 +1,95 @@ +name: CI Infra Checks + +on: + push: + branches: + - main + paths: + - bin/** + - infra/** + - .github/workflows/** + pull_request: + paths: + - bin/** + - infra/** + - .github/workflows/** + +jobs: + lint-github-actions: + # Lint github actions files using https://github.com/rhysd/actionlint + # This job configuration is largely copied from https://github.com/rhysd/actionlint/blob/main/docs/usage.md#use-actionlint-on-github-actions + name: Lint GitHub Actions workflows + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - name: Download actionlint + id: get_actionlint + run: bash <(curl https://raw.githubusercontent.com/rhysd/actionlint/main/scripts/download-actionlint.bash) + shell: bash + - name: Check workflow files + run: ${{ steps.get_actionlint.outputs.executable }} -color + shell: bash + lint-scripts: + name: Lint scripts + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - name: Shellcheck + run: make infra-lint-scripts + check-terraform-format: + name: Check Terraform format + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - uses: hashicorp/setup-terraform@v2 + with: + terraform_version: 1.8.3 + terraform_wrapper: false + - name: Run infra-lint-terraform + run: | + echo "If this fails, run 'make infra-format'" + make infra-lint-terraform + validate-terraform: + name: Validate Terraform modules + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - uses: hashicorp/setup-terraform@v2 + with: + terraform_version: 1.8.3 + terraform_wrapper: false + - name: Validate + run: make infra-validate-modules + check-compliance-with-checkov: + name: Check compliance with checkov + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - uses: actions/setup-python@v4 + with: + python-version: "3.10" + - name: Run Checkov check + # Pin to specific checkov version rather than running from checkov@master + # since checkov frequently adds new checks that can cause CI checks to fail unpredictably. + # There is currently no way to specify the checkov version to pin to (See https://github.com/bridgecrewio/checkov-action/issues/41) + # so we need to pin the version of the checkov-action, which indirectly pins the checkov version. + # In this case, checkov-action v12.2296.0 is mapped to checkov v2.3.194. + uses: bridgecrewio/checkov-action@v12.2296.0 + with: + directory: infra + framework: terraform + quiet: true # only displays failed checks + check-compliance-with-tfsec: + name: Check compliance with tfsec + runs-on: ubuntu-latest + + permissions: + contents: read + pull-requests: write + + steps: + - uses: actions/checkout@v3 + - name: Run tfsec check + uses: aquasecurity/tfsec-pr-commenter-action@v1.2.0 + with: + github_token: ${{ github.token }} diff --git a/.github/workflows/database-migrations.yml b/.github/workflows/database-migrations.yml new file mode 100644 index 00000000..dc61e264 --- /dev/null +++ b/.github/workflows/database-migrations.yml @@ -0,0 +1,44 @@ +name: Database migrations + +on: + workflow_call: + inputs: + app_name: + description: "name of application folder under infra directory" + required: true + type: string + environment: + description: "the name of the application environment (e.g. dev, staging, prod)" + required: true + type: string + +concurrency: database-migrations-${{ inputs.app_name }}-${{ inputs.environment }} + +jobs: + build-and-publish: + name: Build + uses: ./.github/workflows/build-and-publish.yml + with: + app_name: ${{ inputs.app_name }} + ref: ${{ github.ref }} + run-migrations: + name: Run migrations + runs-on: ubuntu-latest + needs: [build-and-publish] + + permissions: + contents: read + id-token: write + + steps: + - uses: actions/checkout@v3 + + - name: Configure AWS credentials + uses: ./.github/actions/configure-aws-credentials + with: + app_name: ${{ inputs.app_name }} + environment: ${{ inputs.environment }} + + - name: Run migrations + run: | + make release-run-database-migrations APP_NAME=${{ inputs.app_name }} ENVIRONMENT=${{ inputs.environment }} diff --git a/.github/workflows/deploy.yml b/.github/workflows/deploy.yml new file mode 100644 index 00000000..c5b4b6e0 --- /dev/null +++ b/.github/workflows/deploy.yml @@ -0,0 +1,43 @@ +name: Deploy + +on: + workflow_call: + inputs: + app_name: + description: "name of application folder under infra directory" + required: true + type: string + environment: + description: "the name of the application environment (e.g. dev, staging, prod)" + required: true + type: string + +concurrency: cd-${{inputs.app_name}}-${{ inputs.environment }} + +jobs: + # Don't need to call the build-and-publish workflow since the database-migrations + # workflow already calls it + database-migrations: + name: Database migrations + uses: ./.github/workflows/database-migrations.yml + with: + app_name: ${{ inputs.app_name }} + environment: ${{ inputs.environment }} + deploy: + name: Deploy + runs-on: ubuntu-latest + needs: [database-migrations] + permissions: + contents: read + id-token: write + steps: + - uses: actions/checkout@v3 + + - name: Configure AWS credentials + uses: ./.github/actions/configure-aws-credentials + with: + app_name: ${{ inputs.app_name }} + environment: ${{ inputs.environment }} + + - name: Deploy release + run: make release-deploy APP_NAME=${{ inputs.app_name }} ENVIRONMENT=${{ inputs.environment }} diff --git a/.github/workflows/markdownlint-config.json b/.github/workflows/markdownlint-config.json new file mode 100644 index 00000000..fec55304 --- /dev/null +++ b/.github/workflows/markdownlint-config.json @@ -0,0 +1,19 @@ +{ + "ignorePatterns" : [ + { + "pattern": "0005-example.md" + }, + { + "pattern": "localhost" + }, + { + "pattern": "127.0.0.1" + } + ], + "replacementPatterns": [ + { + "pattern": "^/", + "replacement": "{{BASEURL}}/" + } + ] +} diff --git a/.github/workflows/vulnerability-scans.yml b/.github/workflows/vulnerability-scans.yml new file mode 100644 index 00000000..77f316ea --- /dev/null +++ b/.github/workflows/vulnerability-scans.yml @@ -0,0 +1,127 @@ +# GitHub Actions CI workflow that runs vulnerability scans on the application's Docker image +# to ensure images built are secure before they are deployed. + +# NOTE: The workflow isn't able to pass the docker image between jobs, so each builds the image. +# A future PR will pass the image between the scans to reduce overhead and increase speed +name: Vulnerability Scans + +on: + workflow_call: + inputs: + app_name: + description: "name of application folder under infra directory" + required: true + type: string + +jobs: + # hadolint-scan: + # runs-on: ubuntu-latest + + # steps: + # - uses: actions/checkout@v3 + + # # Scans Dockerfile for any bad practices or issues + # - name: Scan Dockerfile by hadolint + # uses: hadolint/hadolint-action@v3.1.0 + # with: + # dockerfile: ${{ inputs.app_name }}/Dockerfile + # format: tty + # failure-threshold: warning + # output-file: hadolint-results.txt + + # - name: Save output to workflow summary + # if: always() # Runs even if there is a failure + # run: cat hadolint-results.txt >> "$GITHUB_STEP_SUMMARY" + + trivy-scan: + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v3 + + - name: Build and tag Docker image for scanning + id: build-image + run: | + make APP_NAME=${{ inputs.app_name }} release-build + IMAGE_NAME=$(make APP_NAME=${{ inputs.app_name }} release-image-name) + IMAGE_TAG=$(make release-image-tag) + echo "image=$IMAGE_NAME:$IMAGE_TAG" >> "$GITHUB_OUTPUT" + + - name: Run Trivy vulnerability scan + uses: aquasecurity/trivy-action@master + with: + scan-type: image + image-ref: ${{ steps.build-image.outputs.image }} + format: table + exit-code: 1 + ignore-unfixed: true + vuln-type: os + scanners: vuln,secret + + - name: Save output to workflow summary + if: always() # Runs even if there is a failure + run: | + echo "View results in GitHub Action logs" >> "$GITHUB_STEP_SUMMARY" + + # anchore-scan: + # runs-on: ubuntu-latest + + # steps: + # - uses: actions/checkout@v3 + + # - name: Build and tag Docker image for scanning + # id: build-image + # run: | + # make APP_NAME=${{ inputs.app_name }} release-build + # IMAGE_NAME=$(make APP_NAME=${{ inputs.app_name }} release-image-name) + # IMAGE_TAG=$(make release-image-tag) + # echo "image=$IMAGE_NAME:$IMAGE_TAG" >> "$GITHUB_OUTPUT" + + # - name: Run Anchore vulnerability scan + # uses: anchore/scan-action@v3 + # with: + # image: ${{ steps.build-image.outputs.image }} + # output-format: table + + # - name: Save output to workflow summary + # if: always() # Runs even if there is a failure + # run: echo "View results in GitHub Action logs" >> "$GITHUB_STEP_SUMMARY" + + # dockle-scan: + # runs-on: ubuntu-latest + + # steps: + # - uses: actions/checkout@v3 + + # - name: Build and tag Docker image for scanning + # id: build-image + # run: | + # make APP_NAME=${{ inputs.app_name }} release-build + # IMAGE_NAME=$(make APP_NAME=${{ inputs.app_name }} release-image-name) + # IMAGE_TAG=$(make release-image-tag) + # echo "image=$IMAGE_NAME:$IMAGE_TAG" >> "$GITHUB_OUTPUT" + + # # Dockle doesn't allow you to have an ignore file for the DOCKLE_ACCEPT_FILES + # # variable, this will save the variable in this file to env for Dockle + # - name: Set any acceptable Dockle files + # run: | + # if grep -q "^DOCKLE_ACCEPT_FILES=.*" .dockleconfig; then + # grep -s '^DOCKLE_ACCEPT_FILES=' .dockleconfig >> "$GITHUB_ENV" + # fi + + # - name: Run Dockle container linter + # uses: erzz/dockle-action@v1.3.1 + # with: + # image: ${{ steps.build-image.outputs.image }} + # exit-code: "1" + # failure-threshold: WARN + # accept-filenames: ${{ env.DOCKLE_ACCEPT_FILES }} + + # - name: Save output to workflow summary + # if: always() # Runs even if there is a failure + # run: | + # { + # echo '```json' + # cat dockle-report.json + # echo '```' + # } >> "$GITHUB_STEP_SUMMARY" diff --git a/.gitignore b/.gitignore index c5024362..8fc4637e 100644 --- a/.gitignore +++ b/.gitignore @@ -18,5 +18,3 @@ # Python testing stuff *__pycache__* - -docker-compose.override.yml \ No newline at end of file diff --git a/.grype.yml b/.grype.yml new file mode 100644 index 00000000..9fe419a0 --- /dev/null +++ b/.grype.yml @@ -0,0 +1,20 @@ +# List of vulnerabilities to ignore for the anchore scan +# https://github.com/anchore/grype#specifying-matches-to-ignore +# More info can be found in the docs/infra/vulnerability-management.md file + +# Please add safelists in the following format to make it easier when checking +# Package/module name: URL to vulnerability for checking updates +# Versions: URL to the version history +# Dependencies: Name of any other packages or modules that are dependent on this version +# Link to the dependencies for ease of checking for updates +# Issue: Why there is a finding and why this is here or not been removed +# Last checked: Date last checked in scans +# - vulnerability: The-CVE-or-vuln-id # Remove comment at start of line + +ignore: + # These settings ignore any findings that fall into these categories + - fix-state: not-fixed + - fix-state: wont-fix + - fix-state: unknown + # https://github.com/anchore/grype/issues/1172 + - vulnerability: GHSA-xqr8-7jwr-rhp7 diff --git a/.hadolint.yaml b/.hadolint.yaml index 92af65c7..d552e354 100644 --- a/.hadolint.yaml +++ b/.hadolint.yaml @@ -1,11 +1,6 @@ # List of settings and ignore or safelist findings for the hadolint scanner + # For more information on any settings you can specify, see the actions' documentation here # https://github.com/hadolint/hadolint#configure failure-threshold: warning ignored: [] -override: - info: - # Casts the apt-get install = finding as info - # We have this set since there is no way to specify version for - # build-essentials in the Dockerfile - - DL3008 diff --git a/.template-version b/.template-version new file mode 100644 index 00000000..cdff4627 --- /dev/null +++ b/.template-version @@ -0,0 +1 @@ +7087c56dbe327b8d66fa8237e714eff5d9c3b515 diff --git a/.trivyignore b/.trivyignore new file mode 100644 index 00000000..707b3e98 --- /dev/null +++ b/.trivyignore @@ -0,0 +1,9 @@ +# List of vulnerabilities to ignore for the trivy scan +# Please add safelists in the following format to make it easier when checking +# Package/module name: URL to vulnerability for checking updates +# Versions: URL to the version history +# Dependencies: Name of any other packages or modules that are dependent on this version +# Link to the dependencies for ease of checking for updates +# Issue: Why there is a finding and why this is here or not been removed +# Last checked: Date last checked in scans +#The-CVE-or-vuln-id # Remove comment at start of line \ No newline at end of file diff --git a/Makefile b/Makefile new file mode 100644 index 00000000..071a9e98 --- /dev/null +++ b/Makefile @@ -0,0 +1,232 @@ +PROJECT_ROOT ?= $(notdir $(PWD)) + +# Use `=` instead of `:=` so that we only execute `./bin/current-account-alias` when needed +# See https://www.gnu.org/software/make/manual/html_node/Flavors.html#Flavors +CURRENT_ACCOUNT_ALIAS = `./bin/current-account-alias` + +CURRENT_ACCOUNT_ID = $(./bin/current-account-id) + +# Get the list of reusable terraform modules by getting out all the modules +# in infra/modules and then stripping out the "infra/modules/" prefix +MODULES := $(notdir $(wildcard infra/modules/*)) + +# Check that given variables are set and all have non-empty values, +# die with an error otherwise. +# +# Params: +# 1. Variable name(s) to test. +# 2. (optional) Error message to print. +# Based off of https://stackoverflow.com/questions/10858261/how-to-abort-makefile-if-variable-not-set +check_defined = \ + $(strip $(foreach 1,$1, \ + $(call __check_defined,$1,$(strip $(value 2))))) +__check_defined = \ + $(if $(value $1),, \ + $(error Undefined $1$(if $2, ($2))$(if $(value @), \ + required by target '$@'))) + + +.PHONY : \ + help \ + infra-check-app-database-roles \ + infra-check-compliance-checkov \ + infra-check-compliance-tfsec \ + infra-check-compliance \ + infra-configure-app-build-repository \ + infra-configure-app-database \ + infra-configure-app-service \ + infra-configure-monitoring-secrets \ + infra-configure-network \ + infra-format \ + infra-lint \ + infra-lint-scripts \ + infra-lint-terraform \ + infra-lint-workflows \ + infra-set-up-account \ + infra-test-service \ + infra-update-app-build-repository \ + infra-update-app-database-roles \ + infra-update-app-database \ + infra-update-app-service \ + infra-update-current-account \ + infra-update-network \ + infra-validate-modules \ + lint-markdown \ + release-build \ + release-deploy \ + release-image-name \ + release-image-tag \ + release-publish \ + release-run-database-migrations + + + +infra-set-up-account: ## Configure and create resources for current AWS profile and save tfbackend file to infra/accounts/$ACCOUNT_NAME.ACCOUNT_ID.s3.tfbackend + @:$(call check_defined, ACCOUNT_NAME, human readable name for account e.g. "prod" or the AWS account alias) + ./bin/set-up-current-account $(ACCOUNT_NAME) + +infra-configure-network: ## Configure network $NETWORK_NAME + @:$(call check_defined, NETWORK_NAME, the name of the network in /infra/networks) + ./bin/create-tfbackend infra/networks $(NETWORK_NAME) + +infra-configure-app-build-repository: ## Configure infra/$APP_NAME/build-repository tfbackend and tfvars files + @:$(call check_defined, APP_NAME, the name of subdirectory of /infra that holds the application's infrastructure code) + ./bin/create-tfbackend "infra/$(APP_NAME)/build-repository" shared + +infra-configure-app-database: ## Configure infra/$APP_NAME/database module's tfbackend and tfvars files for $ENVIRONMENT + @:$(call check_defined, APP_NAME, the name of subdirectory of /infra that holds the application's infrastructure code) + @:$(call check_defined, ENVIRONMENT, the name of the application environment e.g. "prod" or "staging") + ./bin/create-tfbackend "infra/$(APP_NAME)/database" "$(ENVIRONMENT)" + +infra-configure-monitoring-secrets: ## Set $APP_NAME's incident management service integration URL for $ENVIRONMENT + @:$(call check_defined, APP_NAME, the name of subdirectory of /infra that holds the application's infrastructure code) + @:$(call check_defined, ENVIRONMENT, the name of the application environment e.g. "prod" or "staging") + @:$(call check_defined, URL, incident management service (PagerDuty or VictorOps) integration URL) + ./bin/configure-monitoring-secret $(APP_NAME) $(ENVIRONMENT) $(URL) + +infra-configure-app-service: ## Configure infra/$APP_NAME/service module's tfbackend and tfvars files for $ENVIRONMENT + @:$(call check_defined, APP_NAME, the name of subdirectory of /infra that holds the application's infrastructure code) + @:$(call check_defined, ENVIRONMENT, the name of the application environment e.g. "prod" or "staging") + ./bin/create-tfbackend "infra/$(APP_NAME)/service" "$(ENVIRONMENT)" + +infra-update-current-account: ## Update infra resources for current AWS profile + ./bin/terraform-init-and-apply infra/accounts `./bin/current-account-config-name` + +infra-update-network: ## Update network + @:$(call check_defined, NETWORK_NAME, the name of the network in /infra/networks) + terraform -chdir="infra/networks" init -input=false -reconfigure -backend-config="$(NETWORK_NAME).s3.tfbackend" + terraform -chdir="infra/networks" apply -var="network_name=$(NETWORK_NAME)" + +infra-update-app-build-repository: ## Create or update $APP_NAME's build repository + @:$(call check_defined, APP_NAME, the name of subdirectory of /infra that holds the application's infrastructure code) + ./bin/terraform-init-and-apply infra/$(APP_NAME)/build-repository shared + +infra-update-app-database: ## Create or update $APP_NAME's database module for $ENVIRONMENT + @:$(call check_defined, APP_NAME, the name of subdirectory of /infra that holds the application's infrastructure code) + @:$(call check_defined, ENVIRONMENT, the name of the application environment e.g. "prod" or "staging") + terraform -chdir="infra/$(APP_NAME)/database" init -input=false -reconfigure -backend-config="$(ENVIRONMENT).s3.tfbackend" + terraform -chdir="infra/$(APP_NAME)/database" apply -var="environment_name=$(ENVIRONMENT)" + +infra-update-app-database-roles: ## Create or update database roles and schemas for $APP_NAME's database in $ENVIRONMENT + @:$(call check_defined, APP_NAME, the name of subdirectory of /infra that holds the application's infrastructure code) + @:$(call check_defined, ENVIRONMENT, the name of the application environment e.g. "prod" or "staging") + ./bin/create-or-update-database-roles $(APP_NAME) $(ENVIRONMENT) + +infra-update-app-service: ## Create or update $APP_NAME's web service module + @:$(call check_defined, APP_NAME, the name of subdirectory of /infra that holds the application's infrastructure code) + @:$(call check_defined, ENVIRONMENT, the name of the application environment e.g. "prod" or "staging") + terraform -chdir="infra/$(APP_NAME)/service" init -input=false -reconfigure -backend-config="$(ENVIRONMENT).s3.tfbackend" + terraform -chdir="infra/$(APP_NAME)/service" apply -var="environment_name=$(ENVIRONMENT)" + +# The prerequisite for this rule is obtained by +# prefixing each module with the string "infra-validate-module-" +infra-validate-modules: ## Run terraform validate on reusable child modules +infra-validate-modules: $(patsubst %, infra-validate-module-%, $(MODULES)) + +infra-validate-module-%: + @echo "Validate library module: $*" + terraform -chdir=infra/modules/$* init -backend=false + terraform -chdir=infra/modules/$* validate + +infra-check-app-database-roles: ## Check that app database roles have been configured properly + @:$(call check_defined, APP_NAME, the name of subdirectory of /infra that holds the application's infrastructure code) + @:$(call check_defined, ENVIRONMENT, the name of the application environment e.g. "prod" or "staging") + ./bin/check-database-roles $(APP_NAME) $(ENVIRONMENT) + +infra-check-compliance: ## Run compliance checks +infra-check-compliance: infra-check-compliance-checkov infra-check-compliance-tfsec + +infra-check-compliance-checkov: ## Run checkov compliance checks + checkov --directory infra + +infra-check-compliance-tfsec: ## Run tfsec compliance checks + tfsec infra + +infra-lint: ## Lint infra code +infra-lint: lint-markdown infra-lint-scripts infra-lint-terraform infra-lint-workflows + +infra-lint-scripts: ## Lint shell scripts + shellcheck bin/** + +infra-lint-terraform: ## Lint Terraform code + terraform fmt -recursive -check infra + +infra-lint-workflows: ## Lint GitHub actions + actionlint + +infra-format: ## Format infra code + terraform fmt -recursive infra + +infra-test-service: ## Run service layer infra test suite + cd infra/test && go test -run TestService -v -timeout 30m + +lint-markdown: ## Lint Markdown docs for broken links + ./bin/lint-markdown + +######################## +## Release Management ## +######################## + +# Include project name in image name so that image name +# does not conflict with other images during local development +IMAGE_NAME := $(PROJECT_ROOT)-$(APP_NAME) + +GIT_REPO_AVAILABLE := $(shell git rev-parse --is-inside-work-tree 2>/dev/null) + +# Generate a unique tag based solely on the git hash. +# This will be the identifier used for deployment via terraform. +ifdef GIT_REPO_AVAILABLE +IMAGE_TAG := $(shell git rev-parse HEAD) +else +IMAGE_TAG := "unknown-dev.$(DATE)" +endif + +# Generate an informational tag so we can see where every image comes from. +DATE := $(shell date -u '+%Y%m%d.%H%M%S') +INFO_TAG := $(DATE).$(USER) + +release-build: ## Build release for $APP_NAME and tag it with current git hash + @:$(call check_defined, APP_NAME, the name of subdirectory of /infra that holds the application's infrastructure code) + cd $(APP_NAME) && $(MAKE) release-build \ + OPTS="--tag $(IMAGE_NAME):latest --tag $(IMAGE_NAME):$(IMAGE_TAG)" + +release-publish: ## Publish release to $APP_NAME's build repository + @:$(call check_defined, APP_NAME, the name of subdirectory of /infra that holds the application's infrastructure code) + ./bin/publish-release $(APP_NAME) $(IMAGE_NAME) $(IMAGE_TAG) + +release-run-database-migrations: ## Run $APP_NAME's database migrations in $ENVIRONMENT + @:$(call check_defined, APP_NAME, the name of subdirectory of /infra that holds the application's infrastructure code) + @:$(call check_defined, ENVIRONMENT, the name of the application environment e.g. "prod" or "dev") + ./bin/run-database-migrations $(APP_NAME) $(IMAGE_TAG) $(ENVIRONMENT) + +release-deploy: ## Deploy release to $APP_NAME's web service in $ENVIRONMENT + @:$(call check_defined, APP_NAME, the name of subdirectory of /infra that holds the application's infrastructure code) + @:$(call check_defined, ENVIRONMENT, the name of the application environment e.g. "prod" or "dev") + ./bin/deploy-release $(APP_NAME) $(IMAGE_TAG) $(ENVIRONMENT) + +release-image-name: ## Prints the image name of the release image + @:$(call check_defined, APP_NAME, the name of subdirectory of /infra that holds the application's infrastructure code) + @echo $(IMAGE_NAME) + +release-image-tag: ## Prints the image tag of the release image + @echo $(IMAGE_TAG) + +######################## +## Scripts and Helper ## +######################## + +help: ## Prints the help documentation and info about each command + @grep -Eh '^[[:print:]]+:.*?##' $(MAKEFILE_LIST) | \ + sort -d | \ + awk -F':.*?## ' '{printf "\033[36m%s\033[0m\t%s\n", $$1, $$2}' | \ + column -t -s "$$(printf '\t')" + @echo "" + @echo "APP_NAME=$(APP_NAME)" + @echo "ENVIRONMENT=$(ENVIRONMENT)" + @echo "IMAGE_NAME=$(IMAGE_NAME)" + @echo "IMAGE_TAG=$(IMAGE_TAG)" + @echo "INFO_TAG=$(INFO_TAG)" + @echo "GIT_REPO_AVAILABLE=$(GIT_REPO_AVAILABLE)" + @echo "SHELL=$(SHELL)" + @echo "MAKE_VERSION=$(MAKE_VERSION)" + @echo "MODULES=$(MODULES)" diff --git a/bin/account-ids-by-name b/bin/account-ids-by-name new file mode 100755 index 00000000..fcfd65bd --- /dev/null +++ b/bin/account-ids-by-name @@ -0,0 +1,23 @@ +#!/bin/bash +# Prints a JSON dictionary that maps account names to account ids for the list +# of accounts given by the terraform backend files of the form +# ..s3.tfbackend in the infra/accounts directory. +set -euo pipefail + +# We use script dir to make this script agnostic to where it's called from. +# This is needed since this script its called from infra//build-repository +# in an external data source +script_dir=$(dirname "$0") + +key_value_pairs=() +backend_config_file_paths=$(ls -1 "$script_dir"/../infra/accounts/*.*.s3.tfbackend) + +for backend_config_file_path in $backend_config_file_paths; do + backend_config_file=$(basename "$backend_config_file_path") + backend_config_name="${backend_config_file/.s3.tfbackend/}" + IFS='.' read -r account_name account_id <<< "$backend_config_name" + key_value_pairs+=("\"$account_name\":\"$account_id\"") +done + +IFS="," +echo "{${key_value_pairs[*]}}" diff --git a/bin/check-database-roles b/bin/check-database-roles new file mode 100755 index 00000000..d38e90fd --- /dev/null +++ b/bin/check-database-roles @@ -0,0 +1,53 @@ +#!/bin/bash +# ----------------------------------------------------------------------------- +# Script that invokes the database role-manager AWS Lambda function to check +# that the Postgres users were configured properly. +# +# Positional parameters: +# app_name (required) โ€“ the name of subdirectory of /infra that holds the +# application's infrastructure code. +# environment (required) - the name of the application environment (e.g. dev, +# staging, prod) +# ----------------------------------------------------------------------------- +set -euo pipefail + +app_name=$1 +environment=$2 + +terraform -chdir="infra/$app_name/app-config" init > /dev/null +terraform -chdir="infra/$app_name/app-config" apply -auto-approve > /dev/null +./bin/terraform-init "infra/$app_name/database" "$environment" +db_role_manager_function_name=$(terraform -chdir="infra/$app_name/database" output -raw role_manager_function_name) +db_config=$(terraform -chdir="infra/$app_name/app-config" output -json environment_configs | jq -r ".$environment.database_config") +payload="{\"action\":\"check\",\"config\":$db_config}" + +echo "=======================" +echo "Checking database roles" +echo "=======================" +echo "Input parameters" +echo " app_name=$app_name" +echo " environment=$environment" +echo +echo "Invoking Lambda function: $db_role_manager_function_name" +echo " Payload: $payload" +echo +cli_response=$(aws lambda invoke \ + --function-name "$db_role_manager_function_name" \ + --no-cli-pager \ + --log-type Tail \ + --payload "$(echo -n "$payload" | base64)" \ + --output json \ + response.json) + +# Print logs out (they are returned base64 encoded) +echo "$cli_response" | jq -r '.LogResult' | base64 --decode +echo +echo "Lambda function response:" +cat response.json +rm response.json + +# Exit with nonzero status if function failed +function_error=$(echo "$cli_response" | jq -r '.FunctionError') +if [ "$function_error" != "null" ]; then + exit 1 +fi diff --git a/bin/check-github-actions-auth b/bin/check-github-actions-auth new file mode 100755 index 00000000..0153d18a --- /dev/null +++ b/bin/check-github-actions-auth @@ -0,0 +1,48 @@ +#!/bin/bash +set -euo pipefail + +github_actions_role=$1 + +# This is used later to determine the run id of the workflow run +# See comment below about "Getting workflow run id" +prev_run_create_time=$(gh run list --workflow check-infra-auth.yml --limit 1 --json createdAt --jq ".[].createdAt") + +echo "Run check-infra-auth workflow with role_to_assume=$github_actions_role" +gh workflow run check-infra-auth.yml --field role_to_assume="$github_actions_role" + +######################### +## Get workflow run id ## +######################### + +echo "Get workflow run id" +# The following commands aims to get the workflow run id of the run that was +# just triggered by the previous workflow dispatch event. There's currently no +# simple and reliable way to do this, so for now we are going to accept that +# there is a race condition. +# +# The current implementation involves getting the create time of the previous +# run. Then continuously checking the list of workflow runs until we see a +# newly created run. Then we get the id of this new run. +# +# References: +# * This stack overflow article suggests a complicated overengineered approach: +# https://stackoverflow.com/questions/69479400/get-run-id-after-triggering-a-github-workflow-dispatch-event +# * This GitHub community discussion also requests this feature: +# https://github.com/orgs/community/discussions/17389 + +echo "Previous workflow run created at $prev_run_create_time" +echo "Check workflow run create time until we find a newer workflow run" +while : ; do + echo -n "." + run_create_time=$(gh run list --workflow check-infra-auth.yml --limit 1 --json createdAt --jq ".[].createdAt") + [[ $run_create_time > $prev_run_create_time ]] && break +done +echo "Found newer workflow run created at $run_create_time" + +echo "Get id of workflow run" +workflow_run_id=$(gh run list --workflow check-infra-auth.yml --limit 1 --json databaseId --jq ".[].databaseId") +echo "Workflow run id: $workflow_run_id" + +echo "Watch workflow run until it exits" +# --exit-status causes command to exit with non-zero status if run fails +gh run watch "$workflow_run_id" --exit-status diff --git a/bin/configure-monitoring-secret b/bin/configure-monitoring-secret new file mode 100755 index 00000000..0ce6f280 --- /dev/null +++ b/bin/configure-monitoring-secret @@ -0,0 +1,43 @@ +#!/bin/bash +# ----------------------------------------------------------------------------- +# This script creates SSM parameter for storing integration URL for incident management +# services. Script creates new SSM attribute or updates existing. +# +# Positional parameters: +# app_name (required) โ€“ the name of subdirectory of /infra that holds the +# application's infrastructure code. +# environment is the name of the application environment (e.g. dev, staging, prod) +# integration_endpoint_url is the url for the integration endpoint for external +# incident management services (e.g. Pagerduty, Splunk-On-Call) +# ----------------------------------------------------------------------------- +set -euo pipefail + +app_name=$1 +environment=$2 +integration_endpoint_url=$3 + +terraform -chdir="infra/$app_name/app-config" init > /dev/null +terraform -chdir="infra/$app_name/app-config" apply -auto-approve > /dev/null + +has_incident_management_service=$(terraform -chdir="infra/$app_name/app-config" output -raw has_incident_management_service) +if [ "$has_incident_management_service" = "false" ]; then + echo "Application does not have incident management service, no secret to create" + exit 0 +fi + +secret_name=$(terraform -chdir="infra/$app_name/app-config" output -json environment_configs | jq -r ".$environment.incident_management_service_integration.integration_url_param_name") + +echo "=====================" +echo "Setting up SSM secret" +echo "=====================" +echo "app_name=$app_name" +echo "environment=$environment" +echo "integration_endpoint_url=$integration_endpoint_url" +echo +echo "Creating SSM secret: $secret_name" + +aws ssm put-parameter \ + --name "$secret_name" \ + --value "$integration_endpoint_url" \ + --type SecureString \ + --overwrite diff --git a/bin/create-or-update-database-roles b/bin/create-or-update-database-roles new file mode 100755 index 00000000..75626ca4 --- /dev/null +++ b/bin/create-or-update-database-roles @@ -0,0 +1,55 @@ +#!/bin/bash +# ----------------------------------------------------------------------------- +# Script that invokes the database role-manager AWS Lambda function to create +# or update the Postgres user roles for a particular environment. +# The Lambda function is created by the infra/app/database root module and is +# defined in the infra/app/database child module. +# +# Positional parameters: +# app_name (required) โ€“ the name of subdirectory of /infra that holds the +# application's infrastructure code. +# environment (required) - the name of the application environment (e.g. dev, +# staging, prod) +# ----------------------------------------------------------------------------- +set -euo pipefail + +app_name=$1 +environment=$2 + +terraform -chdir="infra/$app_name/app-config" init > /dev/null +terraform -chdir="infra/$app_name/app-config" apply -auto-approve > /dev/null +./bin/terraform-init "infra/$app_name/database" "$environment" +db_role_manager_function_name=$(terraform -chdir="infra/$app_name/database" output -raw role_manager_function_name) +db_config=$(terraform -chdir="infra/$app_name/app-config" output -json environment_configs | jq -r ".$environment.database_config") +payload="{\"action\":\"manage\",\"config\":$db_config}" + +echo "================================" +echo "Creating/updating database users" +echo "================================" +echo "Input parameters" +echo " app_name=$app_name" +echo " environment=$environment" +echo +echo "Invoking Lambda function: $db_role_manager_function_name" +echo " Payload: $payload" +echo +cli_response=$(aws lambda invoke \ + --function-name "$db_role_manager_function_name" \ + --no-cli-pager \ + --log-type Tail \ + --payload "$(echo -n "$payload" | base64)" \ + --output json \ + response.json) + +# Print logs out (they are returned base64 encoded) +echo "$cli_response" | jq -r '.LogResult' | base64 --decode +echo +echo "Lambda function response:" +cat response.json +rm response.json + +# Exit with nonzero status if function failed +function_error=$(echo "$cli_response" | jq -r '.FunctionError') +if [ "$function_error" != "null" ]; then + exit 1 +fi diff --git a/bin/create-tfbackend b/bin/create-tfbackend new file mode 100755 index 00000000..e12e8943 --- /dev/null +++ b/bin/create-tfbackend @@ -0,0 +1,56 @@ +#!/bin/bash +# ----------------------------------------------------------------------------- +# This script creates a terraform backend config file for a terraform module. +# It is not meant to be used directly. Instead, it is called by other scripts +# that set up and configure the infra/accounts module and the infra/app/ modules +# such as infra/app/build-repository and infra/app/service +# +# Positional parameters: +# module_dir (required) - the directory of the root module that will be configured +# backend_config_name (required) - the name of the backend that will be created. +# For environment specific configs, the backend_config_name will be the same +# as environment. For shared configs, the backend_config_name will be "shared". +# tf_state_key (optional) - the S3 object key of the tfstate file in the S3 bucket +# Defaults to [module_dir]/[backend_config_name].tfstate +# ----------------------------------------------------------------------------- +set -euo pipefail + +module_dir=$1 +backend_config_name=$2 +tf_state_key="${3:-$module_dir/$backend_config_name.tfstate}" + +# The local tfbackend config file that will store the terraform backend config +backend_config_file="$module_dir/$backend_config_name.s3.tfbackend" + +# Get the name of the S3 bucket that was created to store the tf state +# and the name of the DynamoDB table that was created for tf state locks. +# This will be used to configure the S3 backends in all the application +# modules +tf_state_bucket_name=$(terraform -chdir=infra/accounts output -raw tf_state_bucket_name) +tf_locks_table_name=$(terraform -chdir=infra/accounts output -raw tf_locks_table_name) +region=$(terraform -chdir=infra/accounts output -raw region) + +echo "====================================" +echo "Create terraform backend config file" +echo "====================================" +echo "Input parameters" +echo " module_dir=$module_dir" +echo " backend_config_name=$backend_config_name" +echo + +# Create output file from example file +cp infra/example.s3.tfbackend "$backend_config_file" + +# Replace the placeholder values +sed -i.bak "s//$tf_state_bucket_name/g" "$backend_config_file" +sed -i.bak "s||$tf_state_key|g" "$backend_config_file" +sed -i.bak "s//$tf_locks_table_name/g" "$backend_config_file" +sed -i.bak "s//$region/g" "$backend_config_file" + +# Remove the backup file created by sed +rm "$backend_config_file.bak" + +echo "Created file: $backend_config_file" +echo "------------------ file contents ------------------" +cat "$backend_config_file" +echo "----------------------- end -----------------------" diff --git a/bin/current-account-alias b/bin/current-account-alias new file mode 100755 index 00000000..f7003fc3 --- /dev/null +++ b/bin/current-account-alias @@ -0,0 +1,4 @@ +#!/bin/bash +# Print the current AWS account alias +set -euo pipefail +echo -n "$(aws iam list-account-aliases --query "AccountAliases" --max-items 1 --output text)" diff --git a/bin/current-account-config-name b/bin/current-account-config-name new file mode 100755 index 00000000..83c86e64 --- /dev/null +++ b/bin/current-account-config-name @@ -0,0 +1,12 @@ +#!/bin/bash +# Print the config name for the current AWS account +# Do this by getting the current account and searching for a file in +# infra/accounts that matches "..s3.tfbackend". +# The config name is "."" +set -euo pipefail + +current_account_id=$(./bin/current-account-id) +backend_config_file_path=$(ls -1 infra/accounts/*."$current_account_id".s3.tfbackend) +backend_config_file=$(basename "$backend_config_file_path") +backend_config_name="${backend_config_file/.s3.tfbackend/}" +echo "$backend_config_name" diff --git a/bin/current-account-id b/bin/current-account-id new file mode 100755 index 00000000..92f368bf --- /dev/null +++ b/bin/current-account-id @@ -0,0 +1,4 @@ +#!/bin/bash +# Print the current AWS account id +set -euo pipefail +echo -n "$(aws sts get-caller-identity --query "Account" --output text)" diff --git a/bin/current-region b/bin/current-region new file mode 100755 index 00000000..c25b2c11 --- /dev/null +++ b/bin/current-region @@ -0,0 +1,4 @@ +#!/bin/bash +# Print the current AWS region +set -euo pipefail +echo -n "$(aws configure list | grep region | awk '{print $2}')" diff --git a/bin/deploy-release b/bin/deploy-release new file mode 100755 index 00000000..142e9075 --- /dev/null +++ b/bin/deploy-release @@ -0,0 +1,30 @@ +#!/bin/bash +set -euo pipefail + +app_name=$1 +image_tag=$2 +environment=$3 + +echo "--------------" +echo "Deploy release" +echo "--------------" +echo "Input parameters:" +echo " app_name=$app_name" +echo " image_tag=$image_tag" +echo " environment=$environment" +echo + +# Update task definition and update service to use new task definition + +echo "::group::Starting $app_name deploy of $image_tag to $environment" +TF_CLI_ARGS_apply="-input=false -auto-approve -var=image_tag=$image_tag" make infra-update-app-service APP_NAME="$app_name" ENVIRONMENT="$environment" +echo "::endgroup::" + +# Wait for the service to become stable + +cluster_name=$(terraform -chdir="infra/$app_name/service" output -raw service_cluster_name) +service_name=$(terraform -chdir="infra/$app_name/service" output -raw service_name) +echo "Wait for service $service_name to become stable" +aws ecs wait services-stable --cluster "$cluster_name" --services "$service_name" + +echo "Completed $app_name deploy of $image_tag to $environment" diff --git a/bin/is-image-published b/bin/is-image-published new file mode 100755 index 00000000..f4a36f94 --- /dev/null +++ b/bin/is-image-published @@ -0,0 +1,25 @@ +#!/bin/bash +# Checks if an image tag has already been published to the container repository +# Prints "true" if so, "false" otherwise + +set -euo pipefail + +app_name=$1 +git_ref=$2 + +# Get commit hash +image_tag=$(git rev-parse "$git_ref") + +# Need to init module when running in CD since GitHub actions does a fresh checkout of repo +terraform -chdir="infra/$app_name/app-config" init > /dev/null +terraform -chdir="infra/$app_name/app-config" apply -auto-approve > /dev/null +image_repository_name=$(terraform -chdir="infra/$app_name/app-config" output -raw image_repository_name) +region=$(./bin/current-region) + +result="" +result=$(aws ecr describe-images --repository-name "$image_repository_name" --image-ids "imageTag=$image_tag" --region "$region" 2> /dev/null ) || true +if [ -n "$result" ];then + echo "true" +else + echo "false" +fi diff --git a/bin/lint-markdown b/bin/lint-markdown new file mode 100755 index 00000000..644465ff --- /dev/null +++ b/bin/lint-markdown @@ -0,0 +1,17 @@ +#!/bin/bash + +# To make things simpler, ensure we're in the repo's root directory (one directory up) before +# running, regardless where the user is when invoking this script. + +# Grab the full directory name for where this script lives. +script_dir=$(readlink -f "$0" | xargs dirname) + +# Move up to the root since we want to do everything relative to that. Note that this only impacts +# this script, but will leave the user wherever they were when the script exists. +cd "${script_dir}/.." >/dev/null || exit 1 + +link_check_config=".github/workflows/markdownlint-config.json" + +# Recursively find all markdown files (*.md) in the current directory, excluding node_modules and .venv subfolders. +# Pass them in as args to the lint command using the handy `xargs` command. +find . -name \*.md -not -path "*/node_modules/*" -not -path "*/.venv/*" -print0 | xargs -0 -n1 npx markdown-link-check --config $link_check_config diff --git a/bin/publish-release b/bin/publish-release new file mode 100755 index 00000000..b15e1bc5 --- /dev/null +++ b/bin/publish-release @@ -0,0 +1,46 @@ +#!/bin/bash + +set -euo pipefail + +app_name=$1 +image_name=$2 +image_tag=$3 + +echo "---------------" +echo "Publish release" +echo "---------------" +echo "Input parameters:" +echo " app_name=$app_name" +echo " image_name=$image_name" +echo " image_tag=$image_tag" + +# Need to init module when running in CD since GitHub actions does a fresh checkout of repo +terraform -chdir="infra/$app_name/app-config" init > /dev/null +terraform -chdir="infra/$app_name/app-config" apply -auto-approve > /dev/null +image_repository_name=$(terraform -chdir="infra/$app_name/app-config" output -raw image_repository_name) + +region=$(./bin/current-region) +read -r image_registry_id image_repository_url <<< "$(aws ecr describe-repositories --repository-names "$image_repository_name" --query "repositories[0].[registryId,repositoryUri]" --output text)" +image_registry="$image_registry_id.dkr.ecr.$region.amazonaws.com" + +echo "Build repository info:" +echo " region=$region" +echo " image_registry=$image_registry" +echo " image_repository_name=$image_repository_name" +echo " image_repository_url=$image_repository_url" +echo +echo "Authenticating Docker with ECR" +aws ecr get-login-password --region "$region" \ + | docker login --username AWS --password-stdin "$image_registry" +echo +echo "Check if tag has already been published..." +result="" +result=$(aws ecr describe-images --repository-name "$image_repository_name" --image-ids "imageTag=$image_tag" --region "$region" 2> /dev/null ) || true +if [ -n "$result" ];then + echo "Image with tag $image_tag already published" + exit 0 +fi + +echo "New tag. Publishing image" +docker tag "$image_name:$image_tag" "$image_repository_url:$image_tag" +docker push "$image_repository_url:$image_tag" diff --git a/bin/run-command b/bin/run-command new file mode 100755 index 00000000..c4259af8 --- /dev/null +++ b/bin/run-command @@ -0,0 +1,220 @@ +#!/bin/bash +# ----------------------------------------------------------------------------- +# Run an application command using the application image +# +# Optional parameters: +# --environment_variables - a JSON list of environment variables to add to the +# the container. Each environment variable is an object with the "name" key +# specifying the name of the environment variable and the "value" key +# specifying the value of the environment variable. +# e.g. '[{ "name" : "DB_USER", "value" : "migrator" }]' +# --task_role_arn - the IAM role ARN that the task should assume. Overrides the +# task role specified in the task definition. +# +# Positional parameters: +# app_name (required) - the name of subdirectory of /infra that holds the +# application's infrastructure code. +# environment (required) - the name of the application environment (e.g. dev, +# staging, prod) +# command (required) - a JSON list representing the command to run +# e.g. To run the command `db-migrate-up` with no arguments, set +# command='["db-migrate-up"]' +# e.g. To run the command `echo "Hello, world"` set +# command='["echo", "Hello, world"]') +# ----------------------------------------------------------------------------- +set -euo pipefail + +# Parse optional parameters +environment_variables="" +task_role_arn="" +while :; do + case $1 in + --environment-variables) + environment_variables=$2 + shift 2 + ;; + --task-role-arn) + task_role_arn=$2 + shift 2 + ;; + *) + break + ;; + esac +done + +app_name="$1" +environment="$2" +command="$3" + +echo "===============" +echo "Running command" +echo "===============" +echo "Input parameters" +echo " app_name=$app_name" +echo " environment=$environment" +echo " command=$command" +echo " environment_variables=${environment_variables:-}" +echo " task_role_arn=${task_role_arn:-}" +echo + +# Use the same cluster, task definition, and network configuration that the application service uses +cluster_name=$(terraform -chdir="infra/$app_name/service" output -raw service_cluster_name) +service_name=$(terraform -chdir="infra/$app_name/service" output -raw service_name) + +# Get the log group and log stream prefix so that we can print out the ECS task's logs after running the task +log_group=$(terraform -chdir="infra/$app_name/service" output -raw application_log_group) +log_stream_prefix=$(terraform -chdir="infra/$app_name/service" output -raw application_log_stream_prefix) + +service_task_definition_arn=$(aws ecs describe-services --no-cli-pager --cluster "$cluster_name" --services "$service_name" --query "services[0].taskDefinition" --output text) +# For subsequent commands, use the task definition family rather than the service's task definition ARN +# because in the case of migrations, we'll deploy a new task definition revision before updating the +# service, so the service will be using an old revision, but we want to use the latest revision. +task_definition_family=$(aws ecs describe-task-definition --no-cli-pager --task-definition "$service_task_definition_arn" --query "taskDefinition.family" --output text) + +network_config=$(aws ecs describe-services --no-cli-pager --cluster "$cluster_name" --services "$service_name" --query "services[0].networkConfiguration") +current_region=$(./bin/current-region) +aws_user_id=$(aws sts get-caller-identity --no-cli-pager --query UserId --output text) + +container_name=$(aws ecs describe-task-definition --task-definition "$task_definition_family" --query "taskDefinition.containerDefinitions[0].name" --output text) + +overrides=$(cat << EOF +{ + "containerOverrides": [ + { + "name": "$container_name", + "command": $command + } + ] +} +EOF +) + +if [ -n "$environment_variables" ]; then + overrides=$(echo "$overrides" | jq ".containerOverrides[0].environment |= $environment_variables") +fi + +if [ -n "$task_role_arn" ]; then + overrides=$(echo "$overrides" | jq ".taskRoleArn |= \"$task_role_arn\"") +fi + +task_start_time=$(date +%s) +task_start_time_millis=$((task_start_time * 1000)) + +aws_args=( + ecs run-task + --region="$current_region" + --cluster="$cluster_name" + --task-definition="$task_definition_family" + --started-by="$aws_user_id" + --launch-type=FARGATE + --platform-version=1.4.0 + --network-configuration "$network_config" + --overrides "$overrides" +) +echo "::group::Running AWS CLI command" +printf " ... %s\n" "${aws_args[@]}" +task_arn=$(aws --no-cli-pager "${aws_args[@]}" --query "tasks[0].taskArn" --output text) +echo "::endgroup::" +echo + +# Get the task id by extracting the substring after the last '/' since the task ARN is of +# the form "arn:aws:ecs:::task//" +ecs_task_id=$(basename "$task_arn") + +# The log stream has the format "prefix-name/container-name/ecs-task-id" +# See https://docs.aws.amazon.com/AmazonECS/latest/userguide/using_awslogs.html +log_stream="$log_stream_prefix/$container_name/$ecs_task_id" + +# Wait for log stream to be created before fetching the logs. +# The reason we don't use the `aws ecs wait tasks-running` command is because +# that command can fail on short-lived tasks. In particular, the command polls +# every 6 seconds with describe-tasks until tasks[].lastStatus is RUNNING. A +# task that completes quickly can go from PENDING to STOPPED, causing the wait +# command to error out. +echo "Waiting for log stream to be created" +echo " task_arn=$task_arn" +echo " task_id=$ecs_task_id" +echo " log_stream=$log_stream" + +num_retries_waiting_for_logs=0 +while true; do + num_retries_waiting_for_logs=$((num_retries_waiting_for_logs+1)) + if [ $num_retries_waiting_for_logs -eq 20 ]; then + echo "Timing out task $ecs_task_id waiting for logs" + exit 1 + fi + is_log_stream_created=$(aws logs describe-log-streams --no-cli-pager --log-group-name "$log_group" --query "length(logStreams[?logStreamName==\`$log_stream\`])") + if [ "$is_log_stream_created" == "1" ]; then + break + fi + sleep 5 + echo -n "." +done +echo +echo + +# Tail logs until task stops using a loop that polls for new logs. +# The reason why we don't use `aws logs tail` is because that command is meant +# for interactive use. In particular, it will wait forever for new logs, even +# after a task stops, until the user hits Ctrl+C. And the reason why we don't +# wait until the task completes first before fetching logs is so that we can +# show logs in near real-time, which can be useful for long running tasks. +echo "::group::Tailing logs until task stops" +echo " log_group=$log_group" +echo " log_stream=$log_stream" +echo " task_start_time_millis=$task_start_time_millis" +# Initialize the logs start time filter to the time we started the task +logs_start_time_millis=$task_start_time_millis +while true; do + # Print logs with human readable timestamps by fetching the log events as JSON + # then transforming them afterwards using jq + log_events=$(aws logs get-log-events \ + --no-cli-pager \ + --log-group-name "$log_group" \ + --log-stream-name "$log_stream" \ + --start-time "$logs_start_time_millis" \ + --start-from-head \ + --no-paginate \ + --output json) + # Divide timestamp by 1000 since AWS timestamps are in milliseconds + echo "$log_events" | jq -r '.events[] | ((.timestamp / 1000 | strftime("%Y-%m-%d %H:%M:%S")) + "\t" + .message)' + + # If the task stopped, then stop tailing logs + last_task_status=$(aws ecs describe-tasks --cluster "$cluster_name" --tasks "$task_arn" --query "tasks[0].containers[?name=='$container_name'].lastStatus" --output text) + if [ "$last_task_status" = "STOPPED" ]; then + break + fi + + # If there were new logs printed, then update the logs start time filter + # to be the last log's timestamp + 1 + last_log_timestamp=$(echo "$log_events" | jq -r '.events[-1].timestamp' ) + if [ "$last_log_timestamp" != "null" ]; then + logs_start_time_millis=$((last_log_timestamp + 1)) + fi + + # Give the application a moment to generate more logs before fetching again + sleep 1 +done +echo "::endgroup::" +echo + +container_exit_code=$( + aws ecs describe-tasks \ + --cluster "$cluster_name" \ + --tasks "$task_arn" \ + --query "tasks[0].containers[?name=='$container_name'].exitCode" \ + --output text +) + +if [[ "$container_exit_code" == "null" || "$container_exit_code" != "0" ]]; then + echo "Task failed" >&2 + # Although we could avoid extra calls to AWS CLI if we just got the full JSON response from + # `aws ecs describe-tasks` and parsed it with jq, we are trying to avoid unnecessary dependencies. + container_status=$(aws ecs describe-tasks --cluster "$cluster_name" --tasks "$task_arn" --query "tasks[0].containers[?name=='$container_name'].[lastStatus,exitCode,reason]" --output text) + task_status=$(aws ecs describe-tasks --cluster "$cluster_name" --tasks "$task_arn" --query "tasks[0].[lastStatus,stopCode,stoppedAt,stoppedReason]" --output text) + + echo "Container status (lastStatus, exitCode, reason): $container_status" >&2 + echo "Task status (lastStatus, stopCode, stoppedAt, stoppedReason): $task_status" >&2 + exit 1 +fi diff --git a/bin/run-database-migrations b/bin/run-database-migrations new file mode 100755 index 00000000..7ac49fea --- /dev/null +++ b/bin/run-database-migrations @@ -0,0 +1,62 @@ +#!/bin/bash +# ----------------------------------------------------------------------------- +# Run database migrations +# 1. Update the application's task definition with the latest build, but +# do not update the service +# 2. Run the "db-migrate" command in the container as a new task +# +# Positional parameters: +# app_name (required) โ€“ the name of subdirectory of /infra that holds the +# application's infrastructure code. +# image_tag (required) โ€“ the tag of the latest build +# environment (required) โ€“ the name of the application environment (e.g. dev, +# staging, prod) +# ----------------------------------------------------------------------------- + +set -euo pipefail + +app_name="$1" +image_tag="$2" +environment="$3" + +echo "==================" +echo "Running migrations" +echo "==================" +echo "Input parameters" +echo " app_name=$app_name" +echo " image_tag=$image_tag" +echo " environment=$environment" +echo +echo "Step 0. Check if app has a database" + +terraform -chdir="infra/$app_name/app-config" init > /dev/null +terraform -chdir="infra/$app_name/app-config" apply -auto-approve > /dev/null +has_database=$(terraform -chdir="infra/$app_name/app-config" output -raw has_database) +if [ "$has_database" = "false" ]; then + echo "Application does not have a database, no migrations to run" + exit 0 +fi + +db_migrator_user=$(terraform -chdir="infra/$app_name/app-config" output -json environment_configs | jq -r ".$environment.database_config.migrator_username") + +./bin/terraform-init "infra/$app_name/service" "$environment" +migrator_role_arn=$(terraform -chdir="infra/$app_name/service" output -raw migrator_role_arn) + +echo +echo "::group::Step 1. Update task definition without updating service" + +TF_CLI_ARGS_apply="-input=false -auto-approve -target=module.service.aws_ecs_task_definition.app -var=image_tag=$image_tag" make infra-update-app-service APP_NAME="$app_name" ENVIRONMENT="$environment" + +echo "::endgroup::" +echo +echo 'Step 2. Run "db-migrate" command' + +command='["db-migrate"]' + +# Indent the later lines more to make the output of run-command prettier +environment_variables=$(cat << EOF +[{ "name" : "DB_USER", "value" : "$db_migrator_user" }] +EOF +) + +./bin/run-command --task-role-arn "$migrator_role_arn" --environment-variables "$environment_variables" "$app_name" "$environment" "$command" diff --git a/bin/set-up-current-account b/bin/set-up-current-account new file mode 100755 index 00000000..4fe0bbef --- /dev/null +++ b/bin/set-up-current-account @@ -0,0 +1,110 @@ +#!/bin/bash +# ----------------------------------------------------------------------------- +# This script sets up the terraform backend for the AWS account that you are +# currently authenticated into and creates the terraform backend config file. +# +# The script takes a human readable account name that is used to prefix the tfbackend +# file that is created. This is to make it easier to visually identify while +# tfbackend file corresponds to which AWS account. The account ID is still +# needed since all AWS accounts are guaranteed to have an account ID, and the +# account ID cannot change, whereas other things like the AWS account alias +# can change and is not guaranteed to exist. +# +# Positional parameters: +# account_name (required) - human readable name for the AWS account that you're +# authenticated into. The account name will be used to prefix the created +# tfbackend file so that it's easier to visually identify as opposed to +# identifying the file using the account id. +# For example, you have an account per environment, the account name can be +# the name of the environment (e.g. "prod" or "staging"). Or if you are +# setting up an account for all lower environments, account name can be "lowers". +# If your AWS account has an account alias, you can also use that. +# ----------------------------------------------------------------------------- +set -euo pipefail + +account_name=$1 + +account_id=$(./bin/current-account-id) +region=$(./bin/current-region) + +# Get project name +terraform -chdir="infra/project-config" apply -auto-approve > /dev/null +project_name=$(terraform -chdir=infra/project-config output -raw project_name) + +tf_state_bucket_name="$project_name-$account_id-$region-tf" +tf_state_key="infra/account.tfstate" + +echo "==================" +echo "Setting up account" +echo "==================" +echo "account_name=$account_name" +echo "account_id=$account_id" +echo "project_name=$project_name" +echo "tf_state_bucket_name=$tf_state_bucket_name" +echo "tf_state_key=$tf_state_key" +echo "region=$region" +echo +echo "------------------------------------------------------------------------------" +echo "Bootstrapping the account by creating an S3 backend with minimal configuration" +echo "------------------------------------------------------------------------------" +echo +echo "Creating bucket: $tf_state_bucket_name" +# For creating buckets outside of us-east-1, a LocationConstraint needs to be set +# For creating buckets in us-east-1, LocationConstraint cannot be set +# See https://docs.aws.amazon.com/cli/latest/reference/s3api/create-bucket.html +create_bucket_configuration=("") +if [ "$region" != "us-east-1" ]; then + create_bucket_configuration=("--create-bucket-configuration" "LocationConstraint=$region") +fi + +aws s3api create-bucket --bucket "$tf_state_bucket_name" --region "$region" "${create_bucket_configuration[@]}" > /dev/null +echo +echo "----------------------------------" +echo "Creating rest of account resources" +echo "----------------------------------" +echo + +cd infra/accounts + +# Create the OpenID Connect provider for GitHub Actions to allow GitHub Actions +# to authenticate with AWS and manage AWS resources. We create the OIDC provider +# via AWS CLI rather than via Terraform because we need to first check if there +# is already an existing OpenID Connect provider for GitHub Actions. This check +# is needed since there can only be one OpenID Connect provider per URL per AWS +# account. +github_arn=$(aws iam list-open-id-connect-providers | jq -r ".[] | .[] | .Arn" | grep github || echo "") + +if [[ -z ${github_arn} ]]; then + aws iam create-open-id-connect-provider \ + --url "https://token.actions.githubusercontent.com" \ + --client-id-list "sts.amazonaws.com" \ + --thumbprint-list "0000000000000000000000000000000000000000" +fi + +# Create the infrastructure for the terraform backend such as the S3 bucket +# for storing tfstate files and the DynamoDB table for tfstate locks. +# -reconfigure is used in case this isn't the first account being set up +# and there is already a .terraform directory +terraform init \ + -reconfigure \ + -input=false \ + -backend-config="bucket=$tf_state_bucket_name" \ + -backend-config="key=$tf_state_key" \ + -backend-config="region=$region" + +# Import the bucket that we created in the previous step so we don't recreate it +# But first check if the bucket already exists in the state file. If we are +# re-running account setup and the bucket already exists then skip the import step +if ! terraform state list module.backend.aws_s3_bucket.tf_state; then + terraform import module.backend.aws_s3_bucket.tf_state "$tf_state_bucket_name" +fi + +terraform apply \ + -input=false \ + -auto-approve + +cd - + +module_dir=infra/accounts +backend_config_name="$account_name.$account_id" +./bin/create-tfbackend "$module_dir" "$backend_config_name" "$tf_state_key" diff --git a/bin/terraform-apply b/bin/terraform-apply new file mode 100755 index 00000000..27bc3050 --- /dev/null +++ b/bin/terraform-apply @@ -0,0 +1,41 @@ +#!/bin/bash +# ----------------------------------------------------------------------------- +# Convenience script for running terraform apply for the specified module and configuration name. +# The configuration name is used to determine which .tfvars file to use for the -var-file +# option of terraform apply. +# +# Additional arguments to terraform apply can also be passed in using terraform's built-in environment variables +# TF_CLI_ARGS and TF_CLI_ARGS_name. For example, in CI/CD pipelines, you may want to set +# TF_CLI_ARGS="-input=false -auto-approve" to skip the confirmation prompt. +# See https://developer.hashicorp.com/terraform/cli/config/environment-variables#tf_cli_args-and-tf_cli_args_name +# +# Positional parameters: +# module_dir (required) โ€“ The location of the root module to initialize and apply +# config_name (required) โ€“ The name of the tfvars config. For accounts, the config name is the AWS account alias. +# For application modules, the config name is the name of the environment (e.g. "dev", "staging", "prod"). +# For application modules that are shared across environments, the config name is "shared". +# For example, if a backend config file is named "myaccount.s3.tfbackend", then the config_name would be "myaccount" +# ----------------------------------------------------------------------------- +set -euo pipefail + +module_dir="$1" +config_name="$2" + +# Convenience script for running terraform apply +# config_name โ€“ the name of the backend config. +# For example, if a backend config file is named "myaccount.s3.tfbackend", then the config_name would be "myaccount" +# module_dir โ€“ the location of the root module to initialize and apply + +# 1. Set working directory to the terraform root module directory + +cd "$module_dir" + +# 2. Run terraform apply with the tfvars file (if it exists) that has the same name as the backend config file + +tf_vars_file="$config_name.tfvars" +tf_vars_option="" +if [ -f "$tf_vars_file" ]; then + tf_vars_option="-var-file=$tf_vars_file" +fi + +terraform apply "$tf_vars_option" diff --git a/bin/terraform-init b/bin/terraform-init new file mode 100755 index 00000000..b0d60f99 --- /dev/null +++ b/bin/terraform-init @@ -0,0 +1,27 @@ +#!/bin/bash +# ----------------------------------------------------------------------------- +# Convenience script for running terraform init for the specified module and configuration name. +# The configuration name is used to determine which .tfbackend file to use for the -backend-config +# option of terraform init. +# +# Positional parameters: +# module_dir (required) โ€“ The location of the root module to initialize and apply +# config_name (required) โ€“ The name of the backend config. For accounts, the config name is the AWS account alias. +# For application modules, the config name is the name of the environment (e.g. "dev", "staging", "prod"). +# For application modules that are shared across environments, the config name is "shared". +# For example, if a backend config file is named "myaccount.s3.tfbackend", then the config_name would be "myaccount" +# ----------------------------------------------------------------------------- +set -euo pipefail + +module_dir="$1" +config_name="$2" + +# Run terraform init with the named backend config file + +backend_config_file="$config_name.s3.tfbackend" + +# Note that the backend_config_file path is relative to module_dir, not the current working directory +terraform -chdir="$module_dir" init \ + -input=false \ + -reconfigure \ + -backend-config="$backend_config_file" diff --git a/bin/terraform-init-and-apply b/bin/terraform-init-and-apply new file mode 100755 index 00000000..94e2102e --- /dev/null +++ b/bin/terraform-init-and-apply @@ -0,0 +1,23 @@ +#!/bin/bash +# ----------------------------------------------------------------------------- +# Convenience script for running terraform init followed by terraform apply +# See ./bin/terraform-init and ./bin/terraform-apply for more details. +# +# Positional parameters: +# module_dir (required) โ€“ The location of the root module to initialize and apply +# config_name (required) โ€“ The name of the tfbackend and tfvars config. The name +# is expected to be consistent for both the tfvars file and the tfbackend file. +# ----------------------------------------------------------------------------- +set -euo pipefail + +module_dir="$1" +config_name="$2" + +# Convenience script for running terraform init and terraform apply +# config_name โ€“ the name of the backend config. +# For example if a backend config file is named "myaccount.s3.tfbackend", then the config_name would be "myaccount" +# module_dir โ€“ the location of the root module to initialize and apply + +./bin/terraform-init "$module_dir" "$config_name" + +./bin/terraform-apply "$module_dir" "$config_name" diff --git a/docs/code-reviews.md b/docs/code-reviews.md new file mode 100644 index 00000000..78748afe --- /dev/null +++ b/docs/code-reviews.md @@ -0,0 +1,55 @@ +# Code Reviews + +Code reviews are intended to help all of us grow as engineers and improve the quality of what we ship. +These guidelines are meant to reinforce those two goals. + +## For reviewers + +Aim to respond to code reviews within 1 business day. + +Remember to highlight things that you like and appreciate while reading through the changes, +and to make any other feedback clearly actionable by indicating if it is an optional preference, an important consideration, or an error. + +Don't be afraid to comment with a question, or to ask for clarification, or provide a suggestion, +whenever you donโ€™t understand what is going on at first glance โ€” or if you think an approach or decision can be improved. +Suggestions on how to split a large PR into smaller chunks can also help move things along. +Code reviews give us a chance to learn from one another, and to reflect, iterate on, and document why certain decisions are made. + +Once you're ready to approve or request changes, err on the side of trust. +Send a vote of approval if the PR looks ready except for small minor changes, +and trust that the recipient will address your comments before merging by replying via comment or code to any asks. +Use "request changes" sparingly, unless there's a blocking issue or major refactors that should be done. + +## For authors or requesters + +Your PR should be small enough that a reviewer can reasonably respond within 1-2 business days. +For larger changes, break them down into a series of PRs. +If refactors are included in your changes, try to split them out into separate PRs. + +As a PR writer, you should consider your description and comments as documentation; +current and future team members will refer to it to understand your design decisions. +Include relevant context and business requirements, and add preemptive comments (in code or PR) +for sections of code that may be confusing or worth debate. + +### Draft PRs + +If your PR is a work-in-progress, or if you are looking for specific feedback on things, +create a [Draft Pull Request](https://help.github.com/en/github/collaborating-with-issues-and-pull-requests/about-pull-requests#draft-pull-requests) +and state what you are looking for in the description. + +### Re-requesting reviews after completing changes + +After you make requested changes in response to code review feedback, please re-request reviews from the reviewers to notify them that the work is ready to be reviewed again. + +## Advantages of code review + +- catch and prevent bugs +- consistent code +- find shared code +- share knowledge + +## Challenges of code reviews +- it can take long +- who to ask +- how do you know when is "enough" review +- what should I be reviewing diff --git a/docs/compliance.md b/docs/compliance.md new file mode 100644 index 00000000..b876f3ae --- /dev/null +++ b/docs/compliance.md @@ -0,0 +1,29 @@ +# Compliance + +We use [Checkov](https://www.checkov.io/) and [tfsec](https://aquasecurity.github.io/tfsec/) static analysis tools to check for compliance with infrastructure policies. + +## Setup + +To run these tools locally, first install them by running the following commands. + +* Install `checkov` + + ```bash + brew install checkov + ``` + +* Install `tfsec` + + ```bash + brew install tfsec + ``` + +## Check compliance + +```bash +make infra-check-compliance +``` + +## Pre-Commit + +If you use [pre-commit](https://www.checkov.io/4.Integrations/pre-commit.html), you can optionally add `checkov` to your own pre-commit hook by following the instructions [here](https://www.checkov.io/4.Integrations/pre-commit.html). diff --git a/docs/decisions/index.md b/docs/decisions/index.md index 7595587a..0d5118da 100644 --- a/docs/decisions/index.md +++ b/docs/decisions/index.md @@ -2,9 +2,20 @@ This log lists the architectural decisions for [project name]. - + -- [ADR-0000](0000-use-markdown-architectural-decision-records.md) - Use Markdown Architectural Decision Records +* [ADR-0000](infra/0000-use-markdown-architectural-decision-records.md) - Use Markdown Architectural Decision Records +* [ADR-0001](infra/0001-ci-cd-interface.md) - CI/CD Interface +* [ADR-0002](infra/0002-use-custom-implementation-of-github-oidc.md) - Use custom implementation of GitHub OIDC to authenticate GitHub actions with AWS rather than using module in Terraform Registry +* [ADR-0003](infra/0003-manage-ecr-in-prod-account-module.md) - Manage ECR in prod account module +* [ADR-0004](infra/0004-separate-terraform-backend-configs-into-separate-config-files.md) - Separate tfbackend configs into separate files +* [ADR-0005](infra/0005-separate-database-infrastructure-into-separate-layer.md) - Separate the database infrastructure into a separate layer +* [ADR-0006](infra/0006-provision-database-users-with-serverless-function.md) - Provision database users with serverless function +* [ADR-0007](infra/0007-database-migration-architecture.md) - Database Migration Infrastructure and Deployment +* [ADR-0008](infra/0008-consolidate-infra-config-from-tfvars-files-into-config-module.md) - Consolidate infra configuration from .tfvars files into config module +* [ADR-0009](infra/0009-separate-app-infrastructure-into-layers.md) - Separate app infrastructure into layers +* [ADR-0010](infra/0010-feature-flags-system-design.md) - Feature flags system design +* [ADR-0011](infra/0011-network-layer-design.md) - Design of network layer diff --git a/docs/decisions/infra/0000-use-markdown-architectural-decision-records.md b/docs/decisions/infra/0000-use-markdown-architectural-decision-records.md new file mode 100644 index 00000000..c82b8cd1 --- /dev/null +++ b/docs/decisions/infra/0000-use-markdown-architectural-decision-records.md @@ -0,0 +1,26 @@ +# Use Markdown Architectural Decision Records + +## Context and Problem Statement + +We want to record architectural decisions made in this project. +Which format and structure should these records follow? + +## Considered Options + +* [MADR](https://adr.github.io/madr/) 2.1.2 โ€“ The Markdown Architectural Decision Records +* [Michael Nygard's template](http://thinkrelevance.com/blog/2011/11/15/documenting-architecture-decisions) โ€“ The first incarnation of the term "ADR" +* [Sustainable Architectural Decisions](https://www.infoq.com/articles/sustainable-architectural-design-decisions) โ€“ The Y-Statements +* Other templates listed at +* Formless โ€“ No conventions for file format and structure + +## Decision Outcome + +Chosen option: "MADR 2.1.2", because + +* Implicit assumptions should be made explicit. + Design documentation is important to enable people to understand the decisions later on. + See also [A rational design process: How and why to fake it](https://doi.org/10.1109/TSE.1986.6312940). +* The MADR format is lean and fits our development style. +* The MADR structure is comprehensible and facilitates usage & maintenance. +* The MADR project is vivid. +* Version 2.1.2 is the latest one available when starting to document ADRs. diff --git a/docs/decisions/infra/0001-ci-cd-interface.md b/docs/decisions/infra/0001-ci-cd-interface.md new file mode 100644 index 00000000..13aa6559 --- /dev/null +++ b/docs/decisions/infra/0001-ci-cd-interface.md @@ -0,0 +1,113 @@ +# CI/CD Interface + +* Status: accepted +* Deciders: @lorenyu @kyeah +* Date: 2022-10-04 + +Technical Story: Define Makefile interface between infra and application [#105](https://github.com/navapbc/template-infra/issues/105) + +## Context and Problem Statement + +In order to reuse CI and CD logic for different tech stacks, we need to establish a consistent interface by which different applications can hook into the common CI/CD infrastructure. + +## Decision Drivers + +* We want to define most of the release management logic in `template-infra` but allow application specific methods for building the release. +* The build needs to be able to be run from the CD workflow defined in `template-infra`, but it also needs to be able to be run from the application as part of the CI workflow as one of the CI checks. + +## Proposal + +### CD interface + +Create a `Makefile` in `template-infra` repo that defines the following make targets: + +```makefile +################### +# Building and deploying +################## + +# Generate an informational tag so we can see where every image comes from. +release-build: # assumes there is a Dockerfile in `app` folder + ... code that builds image from app/Dockerfile + +release-publish: + ... code that publishes to ecr + +release-deploy: + ... code that restarts ecs service with new image +``` + +Each of the template applications (template-application-nextjs, template-application-flask) needs to have a `Makefile` in `app/` e.g. `template-application-flask/app/Makefile` with a `release-build` target that builds the release image. The `release-build` target should take an `OPTS` argument to pass into the build command to allow the parent Makefile to pass in arguments like `--tag IMAGE_NAME:IMAGE_TAG` which can facilitate release management. + +```makefile +# template-application-flask/app/Makefile + +release-build: + docker build $(OPTS) --target release . +``` + +By convention, the application's Dockerfile should have a named stage called `release` e.g. + +```Dockerfile +# template-application-flask/app/Dockerfile +... +FROM scratch AS release +... +``` + +### CI interface + +Each application will have its own CI workflow that gets copied into the project's workflows folder as part of installation. `template-application-nextjs` and `template-application-flask` will have `.github/workflows/ci-app.yml`, and `template-infra` will have `.github/workflows/ci-infra.yml`. + +Installation would look something like: + +```bash +cp template-infra/.github/workflows/* .github/workflows/ +cp template-application-nextjs/.github/workflows/* .github/workflows/ +``` + +CI in `template-application-next` might be something like: + +```yml +# template-application-nextjs/.github/workflows/ci-app.yml + +jobs: + lint: + steps: + - run: npm run lint + type-check: + steps: + - run: npm run type-check + test: + steps: + - run: npm test +``` + +CI in `template-application-flask` might be something like: + +```yml +# template-application-nextjs/.github/workflows/ci-app.yml + +jobs: + lint: + steps: + - run: poetry run black + type-check: + steps: + - run: poetry run mypy + test: + steps: + - run: poetry run pytest +``` + +For now, we are assuming there's only one deployable application service per repo, but we could evolve this architecture to have the project rename `app` as part of the installation process to something specific like `api` or `web`, and rename `ci-app.yml` appropriately to `ci-api.yml` or `ci-web.yml`, which would allow for multiple application folders to co-exist. + +## Alternative options considered for CD interface + +1. Application template repos also have their own release-build command (could use Make, but doesn't have to) that is called as part of the application's ci-app.yml. The application's version of release-build doesn't have to tag the release, since the template-infra version will do that: + + * Cons: build command in two places, and while 99% of the build logic is within Dockerfile and code, there's still a small chance that difference in build command line arguments could produce a different build in CI than what is used for release + +2. We can run release-build as part of template-infra's ci-infra.yml, so we still get CI test coverage of the build process + + * Cons: things like tests and linting in ci-app.yml can't use the docker image to run the tests, which potentially means CI and production are using slightly different environments diff --git a/docs/decisions/infra/0002-use-custom-implementation-of-github-oidc.md b/docs/decisions/infra/0002-use-custom-implementation-of-github-oidc.md new file mode 100644 index 00000000..ebec1f86 --- /dev/null +++ b/docs/decisions/infra/0002-use-custom-implementation-of-github-oidc.md @@ -0,0 +1,38 @@ +# Use custom implementation of GitHub OIDC to authenticate GitHub actions with AWS rather than using module in Terraform Registry + +* Status: accepted +* Deciders: @shawnvanderjagt @lorenyu @NavaTim +* Date: 2022-10-05 (Updated 2023-07-12) + +## Context and Problem Statement + +[GitHub recommends using OpenID Connect to authenticate GitHub actions with AWS](https://docs.github.com/en/actions/deployment/security-hardening-your-deployments/about-security-hardening-with-openid-connect). There are [existing modules in the Terraform Registry](https://registry.terraform.io/search/modules?q=github%20actions%20oidc) that implement these resources. Should we use an existing module or implement our own? + +## Decision Drivers + +* Secure +* Maintainable +* Simple and easily understood + +## Considered Options + +* Use [unfunco/oidc-github](https://registry.terraform.io/modules/unfunco/oidc-github/aws/latest) module from Terraform registry +* Use a fork of [unfunco/oidc-github](https://registry.terraform.io/modules/unfunco/oidc-github/aws/latest) in [NavaPBC GitHub org](https://github.com/navapbc) +* Use a custom implementation + +## Decision Outcome + +We chose to use a custom implementation because it allowed for the simplest implementation that was easiest to understand while still being in our full control and therefore avoids security issues with external dependencies. It is also easy to upgrade to use the external module if circumstances change. + +## Pros and Cons of the Options + +The [unfunco/oidc-github](https://registry.terraform.io/modules/unfunco/oidc-github/aws/latest) module from Terraform registry is effectively what we need, but there are a few disadvantages to using it: + +Cons of unfunco/oidc-github: + +* Dependency on an external module in the Terraform registry has negative security implications. Furthermore, the module isn't published by an "official" organization. It is maintained by a single developer, further increasing the security risk. +* The module includes extra unnecessary options that make the code more difficult to read and understand +* In particular, the module includes the option to attach the `AdminstratorAccess` policy to the GitHub actions IAM role, which isn't necessary and could raise concerns in an audit. +* ~~The module hardcodes the GitHub OIDC Provider thumbprint, which isn't as elegant as the method in the [Initial setup for CD draft PR #43](https://github.com/navapbc/template-infra/pull/43) from @shawnvanderjagt which simply pulls the thumbprint via:~~ (Update: July 12, 2023) Starting July 6, 2023, AWS began securing communication with GitHubโ€™s OIDC identity provider (IdP) using GitHub's library of trusted root Certificate Authorities instead of using a certificate thumbprint to verify the IdPโ€™s server certificate. This approach ensures that the GitHub OIDC configuration behaves correctly without disruption during future certificate rotations and changes. With this new validation approach in place, your legacy thumbprint(s) are longer be needed for validation purposes. + +Forking the module to the @navapbc organization gets rid of the security issue, but the other issues remain. diff --git a/docs/decisions/infra/0003-manage-ecr-in-prod-account-module.md b/docs/decisions/infra/0003-manage-ecr-in-prod-account-module.md new file mode 100644 index 00000000..fc2e7556 --- /dev/null +++ b/docs/decisions/infra/0003-manage-ecr-in-prod-account-module.md @@ -0,0 +1,33 @@ +# Manage ECR in prod account module + +* Status: accepted +* Deciders: @lorenyu @shawnvanderjagt @farrcraft @kyeah +* Date: 2022-10-07 + +## Context and Problem Statement + +In a multi-account setup where there is one account per environment, where should the ECR repository live? + +## Decision Drivers + +* Minimize risk that the approach isn't acceptable with the agency given uncertainty around the ability to provision accounts with the agency +* Desire an approach that can adapt equally well to a multi-account setup (with an account per environment) as well as to a single-account setup (with one account across all environments) or a two-account setup (with one account for prod and an account for non-prod) +* Desire an approach that can adapt to situations where there is more than one ECR repository i.e. a project with multiple deployable applications +* Simplicity + +## Considered Options + +* Separate `dist`/`build` account to contain the ECR repository and build artifacts +* Manage the ECR repository as part of the `prod` account +* Manage the ECR repository as part of the `dev` or `stage` account + +## Decision Outcome + +Manage the ECR repository(ies) as part of the prod account module, or for single-account setups, the single account module. Since there will always be a prod account, this approach should have minimal risk for not working for the agency, and will also work for projects that only have or need a single account. + +## Discussion of Alternative Approach + +However, if account management and creation were not an issue, it could be more elegant to have the production candidate build artifacts be managed in a separate `build` account that all environment accounts reference. This approach is described in the following references: + +* [Medium article: Cross-Account Amazon Elastic Container Registry (ECR) Access for ECS](https://garystafford.medium.com/amazon-elastic-container-registry-ecr-cross-account-access-for-ecs-2f90fcb02c80) +* [AWS whitepaper - Recommended Accounts - Deployments](https://docs.aws.amazon.com/whitepapers/latest/organizing-your-aws-environment/deployments-ou.html) diff --git a/docs/decisions/infra/0004-separate-terraform-backend-configs-into-separate-config-files.md b/docs/decisions/infra/0004-separate-terraform-backend-configs-into-separate-config-files.md new file mode 100644 index 00000000..18b5284f --- /dev/null +++ b/docs/decisions/infra/0004-separate-terraform-backend-configs-into-separate-config-files.md @@ -0,0 +1,36 @@ +# Separate tfbackend configs into separate files + +* Status: accepted +* Deciders: @lorenyu @shawnvanderjagt @kyeah @bneutra +* Date: 2023-05-09 + +## Context + +Up until now, most projects adopted an infrastructure module architecture that is structured as follows: Each application environment (prod, staging, etc) is a separate root module that calls a template module. The template module defines all the application infra resources needed for an environment. Things that could be different per environment (e.g. desired ECS task count) are template variables, and each environment can have local vars (or somewhat equivalently, a tfvars file) that customizes those variables. Importantly, each environment has its own backend tfstate file, and the backend config is stored in the environment moduleโ€™s `main.tf`. + +An alternative approach exists to managing the backend configs. Rather than saving the backend config directly in `main.tf`, `main.tf` could contain a [partial configuration](https://developer.hashicorp.com/terraform/language/settings/backends/configuration#partial-configuration), and the rest of the backend config would be passed in during terraform init with a command like `terraform init --backend-config=prod.s3.tfbackend`. There would no longer be a need for separate root modules for each environment. What was previously the template module would instead act as the root module, and engineers would work with different environments solely through separate tfbackend files and tfvar files. Doing this would greatly simplify the module architecture at the cost of some complexity when executing terraform commands due to the extra command line parameters. To manage the extra complexity of running terraform commands, a wrapper script (such as with Makefile commands) can be introduced. + +The approach can be further extended to per-environment variable configurations via an analogous approach with [variable definitions files](https://developer.hashicorp.com/terraform/language/values/variables#variable-definitions-tfvars-files) which can be passed in with the `-var-file` command line option to terraform commands. + +## Notes + +For creating accounts, can't use the .tfbackend backend config file approach because the main.tf file can only have one backend configuration, so if we have the backend configuration as a partial configuration of `backend "s3" {}`, then we can't use that same module to configure a new account, since the process for configuring a new account +requires setting the backend configuration to `backend "local" {}`. We could have a separate duplicate module that has backend set to local. or we could also temporarily update the backend from `"s3"` to `"local"`, but both of those approaches seem confusing. + +Another alternative is to go back to the old way of bootstrapping an account i.e. to do it via a script that creates an S3 bucket via AWS CLI. The bootstrap script would only do the minimal configuration for the S3 bucket, and let Terraform handle the remainder of the configuration, such as creating the dynamodb tables. At this point, there is no risk of not having state locking in place since the account infrastructure has not yet been checked into the repository. This might be the cleanest way to have accounts follow the same pattern of using tfbackend config files. + +## Benefits of separate tfvars and tfbackend files + +* **Reduce risk of differences between environments** โ€“ When different environments have their own root modules, development teams have historically sometimes added one-off resources to specific environments without adding those resources to the template module and without realizing that they're violating an important goal of having multiple environments โ€“ that environments are isolated from each other but function identically. This creates differences between environments that are more than just configuration differences. By forcing the differences to be limited to the `.tfvars` (-var) file, it limits how badly someone can get an environment out of skew. +* **DRY backend configuration** โ€“ With only a single module, there is less duplication of infrastructure code in the `main.tf` file. In particular, provider configurations, shared partial backend configuration, and certain other top-level local variables and data resources no longer need to be duplicated across environments, and provider versions can also be forced to be consistent. +* **Make receiving updates from template-infra more robust** โ€“ Previously, in order for a project to receive updates from the template-infra repo, the project would copy over template files but then revert files that the project has changed. Currently, the many `main.tf` root module files in the template are expected to be changed by the project since they define project specific backend configurations. With the separation of config files, projects are no longer expected to change the `main.tf` files, so the `main.tf` files in `infra/app/build-repository/`, `infra/project-config/`, `infra/app/app-config/`, etc. can be safely copied over from template-infra without needing to be reverted. +* **Reduce the cost of introducing additional infrastructure layers** โ€“ In the future, we may want to add new infrastructure layers that are created and updated independently of the application layer. Examples include a network layer or a database layer. We may want to keep them separate so that changes to the application infrastructure are isolated from changes to the database infrastructure, which should occur much less frequently. Previously, to add a new layer such as the database layer, we would need two additional folders: a `db-env-template` module and a `db-envs` folder with separate root modules for each environment. This mirrors the same structure that we have for the application. With separate backend config and tfvar files, we would only need a single `db` module with separate `.tfbackend` and `.tfvars` files for each environment. + +## Cons of separate tfvars and tfbackend files + +* **Extra layer of abstraction** โ€“ The modules themselves aren't as simple to understand since the configuration is spread out across multiple files, the `main.tf` file and the corresponding `.tfvars` and `.tfbackend` file, rather than all in one `main.tf` file. +* **Requires additional parameters when running terraform** โ€“ Due to the configuration being separated into `.tfvars` and `.tfbackend` files, terraform commands now require a `-var-file` and `-backend-config` command line options. The added complexity may require a wrapper script, introducing yet another layer of abstraction. + +## Links + +* Refined by [ADR-0008](./0008-consolidate-infra-config-from-tfvars-files-into-config-module.md) diff --git a/docs/decisions/infra/0005-separate-database-infrastructure-into-separate-layer.md b/docs/decisions/infra/0005-separate-database-infrastructure-into-separate-layer.md new file mode 100644 index 00000000..98629b65 --- /dev/null +++ b/docs/decisions/infra/0005-separate-database-infrastructure-into-separate-layer.md @@ -0,0 +1,66 @@ +# Separate the database infrastructure into a separate layer + +* Status: proposed +* Deciders: @lorenyu @kyeah @shawnvanderjagt @rocketnova +* Date: 2023-05-25 + +## Context and Problem Statement + +On many projects, setting up the application and database is a multiple-step iterative process. The infrastructure team will first set up an application service without a database, with a simple application health check. The infrastructure team will then work on setting up the database, configuring the application service to have network access to the database cluster, configuring a database user that the application will authenticate as and a database user that will run migrations, and providing a way for the application to authenticate. Then the application team will update the healthcheck to call the database. + +We want to design the template infrastructure so that each infrastructure layer can be configured and created once rather than needing to revisit prior layers. In other words, we'd like to be able to create the database layer, configure the database users, and then create the application layer, without having to go back to make changes to the database layer again. + +There are some dependencies to keep in mind: + +1. The creation of the application service layer depends on the creation of the database layer since a proper application healthcheck will need to hit the database. +2. The database layer includes the creation and configuring of the database users (i.e. PostgreSQL users) that will be used by the application and migration processes in addition to the database cluster infrastructure resources. +3. The network rule that allows inbound traffic to the database from the application depends on both the database and application service. + +## Decision Drivers + +* Avoid circular dependencies +* Avoid the need to revisit a layer (e.g. database layer, application layer) more than one time during the setup of the application environment +* Keep things simple to understand and customize +* Minimize number of steps to set up an environment + +## Module Architecture Options + +* Option A: Put the database infrastructure in the same root module as the application service +* Option B: Separate the database infrastructure into a separate layer + +### Decision Outcome: Separate the database infrastructure into a separate layer + +Changes to database infrastructure are infrequent and therefore do not need to be incorporated as part of the continuous delivery process of deploying the application as it would needlessly slow down application deploys and also increase the risk of accidental changes to the database layer. When database changes are needed, they are sometimes complex due to the stateful nature of databases and can require multiple steps to make those changes gracefully. For these changes, it is beneficial to separate them from application resources so that application deploys can remain unaffected. Finally, breaking down the environment setup process into smaller, more linear steps โ€“ creating the database first before creating the application service โ€“ makes the environment setup process easier to understand and troubleshoot than trying to do everything at once. + +The biggest disadvantage to this approach is ~~the fact that dependencies between root modules cannot be directly expressed in terraform. To mitigate this problem, we should carefully design the interface between root modules to minimize breaking changes in that interface.~~ (Update: 2023-07-07) that dependencies between root modules become more indirect and difficult to express. See [module dependencies](/docs/infra/module-dependencies.md) + +## Pros and Cons of the Options + +### Option A: Put the database infrastructure in the same root module as the application service + +Pros: + +* This is what we've typically done in the past. All the infrastructure necessary for the application environment would live in a single root module, with the exception of shared resources like the ECR image repository. + +Cons: + +* The application service's healthcheck depends on the database cluster to be created and the database user to be provisioned. This cannot easily be done in a single `terraform apply`. +* Changes to the database infrastructure are often more complex than changes to application infrastructure. Unlike application infrastructure, database changes cannot take the approach of spinning up new infrastructure in the desired configuration, redirecting traffic to new infrastructure, and then destroying old infrastructure. This is because application infrastructure can be designed to be stateless while databases are inherently stateful. In such cases, making database changes may require careful coordination and block changes to the application infrastructure, potentially including blocking deploys, while the database changes are made. + +### Option B: Separate the database infrastructure into a separate layer + +Pros: + +* Separating the database layer makes explicit the dependency between the database and the application service, and enables an environment setup process that involves only creating resources when all dependencies have been created first. +* Application deploys do not require making requests to the database infrastructure. +* Complex database changes that require multiple steps can be made without negatively impacting application deploys. +* Not all applications require a database. Having the database layer separate reduces the amount of customization needed at the application layer for different systems. + +Cons: + +* Application resources for a single environment are split across multiple root modules +* Dependencies between root modules cannot be expressed directly in Terraform to use Terraform's built-in dependency graph. ~~Instead, dependencies between root modules need to be configured from one module's outputs to another module's variable definitions file~~ (Update: 2023-07-07) Instead, dependencies between root modules need to leverage Terraform data sources to reference resources across root modules and need to use a shared config module to reference the parameters that can uniquely identify the resource. See [module dependencies](/docs/infra/module-dependencies.md) + +## Links + +* Refined by [ADR-0009](./0009-separate-app-infrastructure-into-layers.md) diff --git a/docs/decisions/infra/0006-provision-database-users-with-serverless-function.md b/docs/decisions/infra/0006-provision-database-users-with-serverless-function.md new file mode 100644 index 00000000..9810d829 --- /dev/null +++ b/docs/decisions/infra/0006-provision-database-users-with-serverless-function.md @@ -0,0 +1,87 @@ +# Provision database users with serverless function + +* Status: proposed +* Deciders: @lorenyu @kyeah @shawnvanderjagt @rocketnova +* Date: 2023-05-25 + +## Context and Problem Statement + +What is the best method for setting up database users and permissions for the application service and the migrations task? + +## Decision Drivers + +* Minimize number of steps +* Security and compliance + +## Considered Options + +* **Terraform** โ€“ Define users and permissions declaratively in Terraform using the [PostgreSQL provider](https://registry.terraform.io/providers/cyrilgdn/postgresql/latest/docs). Apply changes from infrastructure engineer's local machine or from the CI/CD workflow. When initially creating database cluster, make database cluster publicly accessible and define security group rules to allow traffic from local machine or GitHub actions. After creating database users, reconfigure database cluster to make database private. +* **Shell scripts** โ€“ Define users and permissions through a shell script. This could use tools like psql or not. It could also define the permissions in a `.sql` file that gets executed. Similar to the terraform option, the database would need to be made accessible to the machine running the script. One way to do this is for the script itself to temporarily enable access to the database using AWS CLI. +* **Jump host using EC2** โ€“ Run terraform or a shell script but from an EC2 instance within the VPC. Create the EC2 instance and set up network connectivity between the EC2 instance and the database cluster as part of creating the database infrastructure resources. +* **Container task using ECS** โ€“ Build a Docker image that has the code and logic to provision users and permissions and run the code as an ECS task. +* **Serverless function using Lambda** โ€“ Write code to provision database users and permissions and run it as a Lambda function. + +### Decision Outcome: AWS Lambda function + +A Lambda function is the simplest tool that can operate within the VPC and therefore get around the obstacle of needing network access to the database cluster. EC2 instances are too expensive to maintain for rarely used operations like database user provisioning, and ECS tasks add complexity to the infrastructure by requiring an additional ECR image repository and image build step. + +## Pros and Cons of the Options + +### Terraform + +Pros + +* Declarative +* Could create database cluster and database users in a single terraform apply + +Cons + +* The database needs to be publicly accessible to the machine that is running the script โ€“ either the infrastructure engineer's local machine or the continuous integration service (e.g. GitHub Actions). First, this causes the database setup process to take a minimum of three steps: (1) create the database cluster with publicly accessible configuration, (2) provision the database users, (3) make the database cluster private. Second, even if it is an acceptable risk to make the database publicly accessible when it is first created and before it has any data, it may not be an acceptable risk to do so once the system is in production. Therefore, after the system is in production, there would no longer be a way to reconfigure the database users or otherwise maintain the system using this approach. +* Need to modify the database cluster configuration after creating it in order to make it private. Modifications require an additional step, and may also require manual changes to the terraform configuration. + +### Shell scripts + +Pros + +* Simple +* Can represent user configuration as a `.sql` script which could simplify database management by keeping it all within SQL + +Cons + +* Same as the cons for Terraform โ€“ the database needs to be accessible to the machine running the script + +### Jump host using EC2 + +Pros + +* Can leverage the Terraform and Shell script approaches +* Can access the database securely from within the VPC without making the database cluster publicly accessible + +Cons + +* Added infrastructure complexity due to the need to maintain an EC2 instance + +### Container task using ECS + +Pros + +* Flexible: can build everything needed in a Docker container, including installing necessary binaries and bundling required libraries and code +* Can access the database securely from within the VPC without making the database cluster publicly accessible + +Cons + +* Increases complexity of terraform module architecture. There needs to be an ECR repository to store the Docker images. The ECR repository could be in a separate root module, which adds another layer to the module architecture. The ECR repository could be put in the `build-repository` root module, which would clutter the `build-repository` since it's not related to application builds. Or it could be put in the `database` root module and be manually created using Terraform's `-target` flag, but that adds complexity to the setup process. +* Increases number of steps needed to set up the database by at least two, one to create the ECR repository and one to build and publish the Docker image to the ECR repository, before creating the database cluster resources. + +### Serverless function using Lambda + +Pros + +* Flexible: can build many things in a Lambda function +* Can access the database securely from within the VPC without making the database cluster publicly accessible +* Relatively simple + +Cons + +* Adds a new dependency to the application setup process. The setup process will now rely on the programming language used by the Lambda function (Python in this case). +* Can't easily use custom external binaries in AWS Lambda. So will rely mostly on code rather than lower-level scripts like psql. diff --git a/docs/decisions/infra/0007-database-migration-architecture.md b/docs/decisions/infra/0007-database-migration-architecture.md new file mode 100644 index 00000000..e55d9785 --- /dev/null +++ b/docs/decisions/infra/0007-database-migration-architecture.md @@ -0,0 +1,92 @@ +# Database Migration Infrastructure and Deployment + +- Status: proposed +- Deciders: @lorenyu, @daphnegold, @chouinar, @Nava-JoshLong, @addywolf-nava, @sawyerh, @acouch, @SammySteiner +- Date: 2023-06-05 + +## Context and Problem Statement + +What is the most optimal setup for database migrations infrastructure and deployment? +This will break down the different options for how the migration is run, but not the +tools or languages the migration will be run with, which will be dependent on the framework the application is using. + +Questions that need to be addressed: + +1. How will the method get the latest migration code to run? +2. What infrastructure is required to use this method? +3. How is the migration deployment re-run in case of errors? + +## Decision Drivers + +- Security +- Simplicity +- Flexibility + +## Considered Options + +- Run migrations from GitHub Actions +- Run migrations from a Lambda function +- Run migrations from an ECS task +- Run migrations from self-hosted GitHub Actions runners + +## Decision Outcome + +Run migrations from an ECS task using the same container image that is used for running the web service. Require a `db-migrate` script in the application container image that performs the migration. When running the migration task using [AWS CLI's run-task command](https://docs.aws.amazon.com/cli/latest/reference/ecs/run-task.html), use the `--overrides` option to override the command to the `db-migrate` command. + +Default to rolling forward instead of rolling back when issues arise (See [Pitfalls with SQL rollbacks and automated database deployments](https://octopus.com/blog/database-rollbacks-pitfalls)). Do not support rolling back out of the box, but still project teams to easily implement database rollbacks through the mechanism of running an application-specific database rollback script through a general purpose `run-command` script. + +Pros + +- No changes to the database network configuration are needed. The database can remain inaccessible from the public internet. +- Database migrations are agnostic to the migration framework that the application uses as long as the application is able to provide a `db-migrate` script that is accessible from the container's PATH and is able to use IAM authentication for connecting to the database. Applications can use [alembic](https://alembic.sqlalchemy.org/), [flyway](https://flywaydb.org/), [prisma](https://www.prisma.io/), another migration framework, or custom-built migrations. +- Database migrations use the same application image and task definition as the base application. + +Cons + +- Running migrations requires doing a [targeted terraform apply](https://developer.hashicorp.com/terraform/tutorials/state/resource-targeting) to update the task definition without updating the service. Terraform recommends against targeting individual resources as part of a normal workflow. However, this is done to ensure migrations are run before the service is updated. + +## Other options considered + +### Run migrations from GitHub Actions using a direct database connection + +Temporarily update the database to be accessible from the internet and allow incoming network traffic from the GitHub Action runner's IP address. Then run the migrations directly from the GitHub Action runner. At the end, revert the database configuration changes. + +Pros: + +- Simple. Requires no additional infrastructure + +Cons: + +- This method requires temporarily exposing the database to incoming connections from the internet, which may not comply with agency security policies. + +### Run migrations from a Lambda function + +Run migrations from an AWS Lambda function that uses the application's container image. The application container image needs to [implement the lambda runtime API](https://aws.amazon.com/blogs/aws/new-for-aws-lambda-container-image-support/) either by using an AWS base image for Lambda or by implementing the Lambda runtime (see [Working with Lambda container images](https://docs.aws.amazon.com/lambda/latest/dg/images-create.html)). + +Pros: + +- Relatively simple. Lambdas are already used for managing database roles. +- The Lambda function can run from within the VPC, avoiding the need to expose the database to the public internet. +- The Lambda function is separate from the application service, so we avoid the need to modify the service's task definition. + +Cons: + +- Lambda function container images need to [implement the lambda runtime API](https://aws.amazon.com/blogs/aws/new-for-aws-lambda-container-image-support/). This is a complex application requirement that would significantly limit the ease of use of the infrastructure. +- Lambda functions have a maximum runtime of 15 minutes, which can limit certain kinds of migrations. + +### Run migrations from self-hosted GitHub Actions runners + +Then run the migrations directly from a [self-hosted GitHub Action runner](https://docs.github.com/en/actions/hosting-your-own-runners/managing-self-hosted-runners/about-self-hosted-runners). Configure the runner to have network access to the database. + +Pros + +- If a project already uses self-hosted runners, this can be the simplest option as it provides all the benefits of running migrations directly from GitHub Actions without the security impact. + +Cons + +- The main downside is that this requires maintaining self-hosted GitHub Action runners, which is too costly to implement and maintain for projects that don't already have it set up. + +## Related ADRS + +- [Separate the database infrastructure into a separate layer](./0005-separate-database-infrastructure-into-separate-layer.md) +- [Provision database users with serverless function](./0006-provision-database-users-with-serverless-function.md) diff --git a/docs/decisions/infra/0008-consolidate-infra-config-from-tfvars-files-into-config-module.md b/docs/decisions/infra/0008-consolidate-infra-config-from-tfvars-files-into-config-module.md new file mode 100644 index 00000000..3174021d --- /dev/null +++ b/docs/decisions/infra/0008-consolidate-infra-config-from-tfvars-files-into-config-module.md @@ -0,0 +1,30 @@ +# Consolidate infra configuration from .tfvars files into config module + +* Status: accepted +* Deciders: @lorenyu @rocketnova @kyeah @acouch +* Date: 2023-09-07 + +Technical Story: [Replace configure scripts with project/app config variables #312](https://github.com/navapbc/template-infra/issues/312) + +## Context + +Currently, application infrastructure configuration is split across config modules (see [app-config](/infra/app/app-config/)) as well as .tfvars files in each of the application's infra layers - infra/app/build-repository, infra/app/database, and infra/app/service. As @kyeah pointed out, itโ€™s easy to make mistakes when configuration is spread across multiple files, and expressed a desire to manage tfvars across environments all in a single file the way that some applications do for application configuration. Also, as @acouch [pointed out](https://github.com/navapbc/template-infra/pull/282#discussion_r1219930653), there is a lot of duplicate code with the configure scripts (setup-current-account.sh, configure-app-build-repository.sh, configure-app-database.sh, configure-app-service.sh) that configure the backend config and variable files for each infrastructure layer, which increases the burden of maintaining the configuration scripts. + +## Overview + +This ADR proposes the following: + +* Move all environment configuration into [app-config](/infra/app/app-config/) modules +* Remove the need for .tfvars files +* Remove the configuration scripts that are currently used for configuring each infrastructure layer + +Benefits: + +* All configuration can now be managed in the [app-config](/infra/app/app-config/) module. +* All dependencies between root modules can be managed explicitly via the [app-config](/infra/app/app-config/) module. +* Custom configuration scripts no longer need to be maintained +* Eliminates the need to specify -var-file option when running terraform apply, which reduces the need for terraform wrapper scripts + +## Links + +* Builds on [ADR-0004](./0004-separate-terraform-backend-configs-into-separate-config-files.md) diff --git a/docs/decisions/infra/0009-separate-app-infrastructure-into-layers.md b/docs/decisions/infra/0009-separate-app-infrastructure-into-layers.md new file mode 100644 index 00000000..7d624738 --- /dev/null +++ b/docs/decisions/infra/0009-separate-app-infrastructure-into-layers.md @@ -0,0 +1,40 @@ +# Separate app infrastructure into layers + +* Status: accepted +* Deciders: @lorenyu @rocketnova @jamesbursa +* Date: 2023-09-11 + +Technical Story: [Document rationale for splitting up infra layers across multiple root modules](https://github.com/navapbc/template-infra/issues/431) + +## Context and Problem Statement + +This document builds on the database module design [ADR: Separate the database infrastructure into a separate layer](./0005-separate-database-infrastructure-into-separate-layer.md) to describe the general rationale for separating application environment infrastructure into separate root modules that are managed in separate terraform state files and updated separately rather than all in one single root module. It restates and summarizes the rationale from the previous ADR and includes additional motivating examples. + +## Overview + +Based on the factors in the section below, the infrastructure has been grouped into the following separate layers: + +* Account layer +* Network layer +* Build repository layer +* Database layer +* Service layer + +### Factors + +* **Variations in number and types of environments in each layer:** Not all layers of infrastructure have the same concept of "environment" as the application layer. The AWS account layer might have one account for all applications, two accounts, one for non-production environments and one for the production environment, or one account per environment. The network (VPC) layer can have similar variations (one VPC for non-prod and one for prod, or one per environment). The build repository layer only has one instance that's shared across all environments. And the database layer may or may not even be needed depending on the application. Putting all resources in one root module would disallow these variations between layers unless you introduce special case logic that behaves differently based on the environment (e.g. logic like "if environment = prod then create build-repository"), which increases complexity and reduces consistency between environments. + +* **AWS uniqueness constraints on resources:** This is a special case of the previous bullet, but some resources have uniqueness constraints in AWS. For example, there can only be one OIDC provider for GitHub actions per AWS account (see [Creating OIDC identity providers](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_oidc.html)). As another example, there can only be one VPC endpoint per VPC per AWS service (see [Fix conflicting DNS domain errors for interface VPC endpoints](https://repost.aws/knowledge-center/vpc-interface-endpoint-domain-conflict)). Therefore, if multiple application environments share a VPC, they can't each create a VPC endpoint for the same AWS service. As such, the VPC endpoint logically belongs to the network layer and VPC endpoints should be created and managed per network environment rather than per application environment. + +* **Policy constraints on what resources project team is authorized to manage:** Some projects have restrictions on who can create or modify certain categories of resources. For example, on some projects, VPCs have to be created by a central cloud operations team upon request and provided to the project team. Separating the infrastructure into modular layers allows project teams to manage downstream layers like the database and service even if upstream layers are managed externally. In our example, if the VPC layer is provided by another department, the project team can skip using the network layer, or modify the network layer to build upon the externally provided VPC, and the project team need not refactor the rest of the infrastructure. + +* **Out of band dependencies:** Some infrastructure resources depend on steps that take place outside of AWS and Terraform in order to complete the creation for those layers, which makes it infeasible to rely on Terraform's built-in resource dependency graph to manage the creation of downstream resources. For example, creating an SSL/TLS certificate relies on an external step to verify ownership of the domain before it can be used by a downstream load balancer. As another example, after creating a database cluster, the database schemas, roles, and privileges need to be configured before they can be used by a downstream service. Separating infrastructure layers allows upstream dependencies to be fully created before attempting to create downstream dependent resources. + +* **Mitigate risk of accidental changes:** Some layers, such as the network and database layers, aren't expected to change frequently, whereas the service layer is expected to change on every deploy in order to update the image tag in the task definition. Separating the layers reduces the risk of accidentally making changes to one layer when applying changes to another layer. + +* **Speed of terraform plans:** The more resources are managed in a terraform state file, the more network calls terraform needs to make to AWS in order to fetch the current state of the infrastructure, which causes terraform plans to take more time. Separating out resources that rarely need to change improves the efficiency of making infrastructure changes. + +## Links + +* Based on [ADR-0005](./0005-separate-database-infrastructure-into-separate-layer.md) +* [Module architecture](/docs/infra/module-architecture.md) diff --git a/docs/decisions/infra/0010-feature-flags-system-design.md b/docs/decisions/infra/0010-feature-flags-system-design.md new file mode 100644 index 00000000..092cac70 --- /dev/null +++ b/docs/decisions/infra/0010-feature-flags-system-design.md @@ -0,0 +1,124 @@ +# Feature flags system design + +* Deciders: @aligg @Nava-JoshLong @lorenyu +* Date: 2023-11-28 + +## Context + +All projects should have some sort of feature flag mechanism for controlling the release and activation of features. This accelerates product development by unblocking developers from being able to deploy continuously while still providing business owners with control over when features are visible to end users. More advanced feature flag systems can also provide the ability to do gradual rollouts to increasing percentages of end-users and to do split tests (also known as A/B tests) to evaluate the impact of different feature variations on user behavior and outcomes, which provide greater flexibility on how to reduce the risk of launching features. As an example, when working on a project to migrate off of legacy systems, having the ability to slowly throttle traffic to the new system while monitoring for issues in production is critical to managing risk. + +## Requirements + +1. The project team can define feature flags, or feature toggles, that enable/disable a set of functionality in an environment, depending on whether the flag is enabled or disabled. +2. The feature flagging system should support gradual rollouts, and the ability to roll out a feature incrementally to a percentage of users. +3. Separate feature flag configuration from implementation of the feature flags, so that feature flags can be changed frequently through configuration without touching the underlying feature flag infrastructure code. + +## Approach + +This tech spec explores the use of [AWS CloudWatch Evidently](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch-Evidently.html), a service that provides functionality for feature flags, gradual rollouts, and conducting split testing (A/B testing) experiments. + +## Feature management + +One key design question is how features should be managed once defined. How should a team go about enabling and disabling the feature flags and adjusting the percentage of traffic to send to a new feature during a feature launch? + +### Option 1. Manage features using app-config module as part of service layer + +Define features in [app-config](/infra/app/app-config/), and use that configuration in the [service layer](/infra/app/service/) to create and configure the features in AWS Evidently. + +* Everything is defined in code and in one place. +* Feature and feature configurations are updated automatically as part of service deploys or can be done manually via a `terraform apply`. + +The configuration in the app-config module might look something like the following: + +```terraform +features = { + some_disabled_feature = {} // defaults to enabled = false + + some_enabled_feature = { + enabled = true + } + + partially_rolled_out_feature = { + throttle_percentage = 0.2 + } +} +``` + +### Option 2. Manage features using app-config as part of a separate infrastructure layer + +Define features in [app-config](/infra/app/app-config/main.tf). Create the features in the [service layer](/infra/app/service/) but set things like throttle percentages (for gradual rollouts) in a separate infrastructure layer. + +* Allows for separation of permissions. For example, individuals can have permission to update feature launch throttle percentages without having permission to create, edit, or delete the features themselves. + +### Option 3. Manage features in AWS Console outside of Terraform + +Define features in [app-config](/infra/app/app-config/main.tf) and create them in the [service layer](/infra/app/service), but set things like throttle percentages (for gradual rollouts) outside of Terraform (e.g. via AWS Console). Use `lifecycle { ignore_changes = [entity_overrides] }` in the terraform configuration for the `aws_evidently_feature` resources to ignore settings that are managed via the AWS Console. + +* Empowers non-technical roles like business owners and product managers to enable and disable feature flags and adjust feature launch throttle percentages without needing to depend on the development team. +* A no-code approach using the AWS Console GUI means that it's possible to leverage the full set of functionality offered by AWS CloudWatch Evidently, including things like scheduled launches, with minimal training and without needing to learn how to do it in code. + +A reduced configuration in the app-config module that just defines the features might look something like the following: + +```terraform +feature_flags = [ + "some_new_feature_1", "some_new_feature_2" +] +``` + +## Decision Outcome + +Chosen option: "Option 3: Manage features in AWS Console outside of terraform". The ability to empower business and product roles to control launches and experiments without depending on the engineering team maximizes autonomy and allows for the fastest delivery. + +## Notes on application layer design + +The scope of this tech spec is focused on the infrastructure layer, but we'll include some notes on the elements of feature flag management that will need to be handled at the application layer. + + +### Application layer requirements + +1. Client interface with feature flag service โ€” Applications need a client module that captures the feature flag service abstraction. The application code will interface with this module rather than directly with the underlying feature flag service. +2. Local development โ€” Project team developers need a way to create and manage feature flags while developing locally, ideally without dependencies on an external service. + +### Application layer design + +#### Feature flag module interface + +At its core, the feature flag module needs a function `isFeatureEnabled` that determines whether a feature has been enabled. It needs to accept a feature name, and for gradual rollouts, it will also need a user identifier. This is so that the system can remember which variation was assigned to a given user so that any individual user will have a consistent experience. + +```ts +interface FeatureFlagService { + isFeatureEnabled(featureName: string, userId?: string): boolean +} +``` + +#### Adapter pattern + +The feature flag module should use the adapter pattern to provide different mechanisms for managing feature flags depending on the environment. Deployed cloud environments should use the Amazon CloudWatch Evidently service. Local development environments could use a mechanism available locally, such as environment variables, config files, cookies, or a combination. + +```ts +import { EvidentlyClient, EvaluateFeatureCommand } from "@aws-sdk/client-evidently"; + +class EvidentlyFeatureFlagService implements FeatureFlagService { + client: EvidentlyClient + + isFeatureEnabled(feature: string, userId?: string): boolean { + const command = new EvaluateFeatureCommand({ + ... + feature, + entityId: userId, + ... + }); + const response = await this.client.send(command) + ... + } +} +``` + +```ts +class LocalFeatureFlagService implements FeatureFlagService { + + isFeatureEnabled(feature: string, userId?: string): boolean { + // check config files, environment variables, and/or cookies + } +} +``` diff --git a/docs/decisions/infra/0011-network-layer-design.md b/docs/decisions/infra/0011-network-layer-design.md new file mode 100644 index 00000000..863e5b64 --- /dev/null +++ b/docs/decisions/infra/0011-network-layer-design.md @@ -0,0 +1,135 @@ +# Design of network layer + +* Deciders: @lorenyu @shawnvanderjagt +* Date: 2023-12-01 + +## Context and Problem Statement + +Most projects will need to deploy their applications into custom VPCs. The policies around VPCs can vary. For example, some projects might require each application environment to be in its own VPC, while other projects might have all lower environments share a VPC. Some projects might have all resources live in one AWS account, while others might isolate resources into separate accounts. Some projects might have permission to create and configure the VPCs (according to agency security policies), while other projects might have the VPC created by the agency's shared infrastructure team before it's provided to the project team to use. This technical specification proposes a design of the network layer that accommodates these various configurations in a simple modular manner. + +## Requirements + +1. The project team can create any number of networks, or VPCs, independently of the number of AWS accounts or the number of applications or application environments. +2. Created VPCs can be mapped arbitrarily to AWS accounts. They can all be created in a single AWS account or separated across multiple AWS accounts. +3. An application environment can map to any of the created VPCs, or to a VPC that is created outside of the project. + +We aim to achieve these requirements without adding complexity to the other layers. The network layer should be decoupled from the other layers. + +## Approach + +### Network configuration + +Define the configuration for networks in a new property `network_configs` in the [project-config module](/infra/project-config/main.tf). `network_configs` is a map from the network name to the network configuration. The network name is a name the project team chooses to serve as a human-readable identifier to reference the network. To keep network configuration DRY and reuse common configurations between networks, create a sub-module `network-config` under the project-config module, analogous to the [env-config module](/infra/app/app-config/env-config/) under the [app-config module](/infra/app/app-config/). The `project-config` module might look something like this: + +```terraform +# project-config/main.tf + +network_configs = { + dev = module.dev_network_config + ... + [NETWORK_NAME] = module.[NETWORK_NAME]_network_config +} + +# project-config/dev-network.tf + +module "dev_network_config" { + source "./network-config" + ... +} + +... + +# project-config/[NETWORK_NAME]-network.tf + +module "[NETWORK_NAME]_network_config" { + source "./network-config" + ... +} +``` + +Each network config will have the following properties: + +* **account_name** โ€” Name of the AWS account that the VPC should be created in. Used to document which AWS account the network lives in and to determine which AWS account to authenticate into when making modifications to the network in scripts such as CI/CD +* Each network will have three subnets, (1) a public subnet, (2) a private subnet for the application layer, and (3) a private subnet for the data layer +* The network will also have different properties depending on the applications that are using the network (see [Application-specific network configuration](#application-specific-network-configuration)) + +### Add network_name tag to VPC + +Add a "network_name" name tag to the VPC with the name of the network. The VPC tag will be used by the service layer to identify the VPC in an [aws_vpc data source](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/vpc). Tags are used because at this time AWS VPCs do not have any user-provided identifiers such as a VPC name. Generated identifiers like `vpc_id` cannot be used because `vpc_id` is not known statically at configuration time, and we are following the pattern of [using configuration and data sources to manage dependencies between different infrastructure layers](/docs/infra/module-dependencies.md#use-config-modules-and-data-resources-to-manage-dependencies-between-root-modules). + +## Application-specific network configuration + +In order to determine which VPC to use for each application environment, add a `network_name` property to the [environment config](/infra/app/app-config/env-config/). The network name will be used in the database and service layers by the `aws_vpc` data source: + +```terraform +data "aws_vpc" "network" { + tags = { + network_name = local.environment_config.network_name + } +} +``` + +Networks associated with applications using the `network_name` property will have the following properties based on the application configuration. + +1. The `has_database` setting determines whether or not to create VPC endpoints needed by the database layer. +2. The `has_external_non_aws_service` setting determines whether or not to create NAT gateways, which allows the service in the private subnet to make requests to the internet. + +### Example configurations + +Example project with a multi-account setup + +```mermaid +graph RL; + subgraph accounts + dev_account[dev] + staging_account[staging] + prod_account[prod] + end + + subgraph networks + dev_network[dev] + staging_network[staging] + prod_network[prod] + end + + subgraph environments + dev_environment[dev] + staging_environment[staging] + prod_environment[prod] + end + + dev_network --> dev_account + staging_network --> staging_account + prod_network --> prod_account + + dev_environment --> dev_network + staging_environment --> staging_network + prod_environment --> prod_network +``` + +Example project with a single account and a shared VPC "lowers" for lower environments + +```mermaid +graph RL; + subgraph accounts + shared_account[shared] + end + + subgraph networks + lowers_network[lowers] + prod_network[prod] + end + + subgraph environments + dev_environment[dev] + staging_environment[staging] + prod_environment[prod] + end + + lowers_network --> shared_account + prod_network --> shared_account + + dev_environment --> lowers_network + staging_environment --> lowers_network + prod_environment --> prod_network +``` diff --git a/docs/decisions/template.md b/docs/decisions/template.md index 25696bbe..fd9d1a8b 100644 --- a/docs/decisions/template.md +++ b/docs/decisions/template.md @@ -8,7 +8,7 @@ Technical Story: [description | ticket/issue URL] ## Context and Problem Statement -[Describe the context and problem statement, e.g., in free form using two to three sentences. You may want to articulate the problem in form of a question.] +[Describe the context and problem statement, e.g., in free form using two to three sentences. You may want to articulate the problem in the form of a question.] ## Decision Drivers diff --git a/docs/feature-flags.md b/docs/feature-flags.md new file mode 100644 index 00000000..4c053b74 --- /dev/null +++ b/docs/feature-flags.md @@ -0,0 +1,31 @@ +# Feature flags and partial releases + +Feature flags are an important tool that enables [trunk-based development](https://trunkbaseddevelopment.com/). They allow in-progress features to be merged into the main branch while still allowing that branch to be deployed to production at any time, thus decoupling application deploys from feature releases. For a deeper introduction, [Martin Fowler's article on Feature Toggles](https://martinfowler.com/articles/feature-toggles.html) and [LaunchDarkly's blog post on feature flags](https://launchdarkly.com/blog/what-are-feature-flags/) are both great articles that explain the what and why of feature flags. + +## How it works + +This project leverages [Amazon CloudWatch Evidently](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch-Evidently.html) to create and manage feature flags. + +## Creating feature flags + +The list of feature flags for an application is defined in the `feature_flags` property in its app-config module (in `/infra/[app_name]/app-config/feature-flags.tf`). To create a new feature flag, add a new string to that list. To remove a feature flag, remove the feature flag from the list. The set of feature flags will be updated on the next `terraform apply` of the service layer, or during the next deploy of the application. + +## Querying feature flags in the application + +To determine whether a particular feature should be enabled or disabled for a given user, the application code calls an "is feature enabled" function in the feature flags module. Under the hood, the module will call AWS Evidently's [EvaluateFeature](https://docs.aws.amazon.com/cloudwatchevidently/latest/APIReference/API_EvaluateFeature.html) API to determine whether a feature is enabled or disabled. For partial rollouts, it will remember which variation of the application a particular user saw and keep the user experience consistent for that user. For more information about the feature flags module, look in the application code and docs. + +## Managing feature releases and partial rollouts via AWS Console + +The system is designed to allow the managing of feature releases and partial rollouts outside of Terraform, which empowers business owners and product managers to control enable and disable feature flags and adjust feature launch traffic percentages without needing to depend on the development team. + +### To enable or disable a feature + +1. Navigate to the Evidently service in AWS Console, select the appropriate Evidently feature flags project for the relevant application environment, and select the feature you want to manage. +2. In the actions menu, select "Edit feature". +3. Under "Feature variations", select either "FeatureOn" (to enable a feature) or "FeatureOff" (to disable a feature) to be the "Default" variation, then submit. **Warning: Do not modify the variation values. "FeatureOn" should always have a value of "True" and "FeatureOff" should always have a value of "False".** + +### To manage a partial rollout + +1. Navigate to the Evidently service in AWS Console, and select the appropriate Evidently feature flags project for the relevant application environment +2. Select "Create launch" to create a new partial rollout plan, or select an existing launch to manage an existing rollout +3. Under "Launch configuration", choose the traffic percentage you want to send to each variation, and choose whether you want the launch to begin immediately or on a schedule. diff --git a/docs/infra/background-jobs.md b/docs/infra/background-jobs.md new file mode 100644 index 00000000..9455948d --- /dev/null +++ b/docs/infra/background-jobs.md @@ -0,0 +1,16 @@ +# Background jobs + +The application may have background jobs that support the application. Types of background jobs include: + +* Jobs that occur on a fixed schedule (e.g. every hour or every night) โ€” This type of job is useful for ETL jobs that can't be event-driven, such as ETL jobs that ingest source files from an SFTP server or from an S3 bucket managed by another team that we have little control or influence over. **This functionality has not yet been implemented** +* Jobs that trigger on an event (e.g. when a file is uploaded to the document storage service). This type of job can be processed by two types of tasks: + * Tasks that spin up on demand to process the job โ€” This type of task is appropriate for low-frequency ETL jobs **This is the currently the only type that's supported** + * Worker tasks that are running continuously, waiting for jobs to enter a queue that the worker then processes โ€” This type of task is ideal for high frequency, low-latency jobs such as processing user uploads or submitting claims to an unreliable or high-latency legacy system **This functionality has not yet been implemented** + +## Job configuration + +Background jobs for the application are configured via the application's `env-config` module. The current infrastructure supports jobs that spin up on demand tasks when a file is uploaded to the document storage service. These are configured in the `file_upload_jobs` configuration. + +## How it works + +File upload jobs use AWS EventBridge to listen to "Object Created" events when files are uploaded to S3. An event rule is created for each job configuration, and each event rule has a single event target that targets the application's ECS cluster. The task uses the same container image that the service uses, and the task's configuration is the same as the service's configuration with the exception of the entrypoint, which is specified by the job configuration's `task_command` setting, which can reference the bucket and path of the file that triggered the event by using the template values `` and ``. diff --git a/docs/infra/cloud-access-control.md b/docs/infra/cloud-access-control.md new file mode 100644 index 00000000..c90a9f92 --- /dev/null +++ b/docs/infra/cloud-access-control.md @@ -0,0 +1,7 @@ +# Cloud Access Control + +GitHub Actions needs permissions to create, modify, and destroy resources in the AWS account as part of the CI/CD workflows. The permissions GitHub Actions has are determined by the IAM role and policy that's defined in the account layer. + +## Adding/removing permissions from GitHub Actions + +To add or remove permissions from the CI/CD role, update the list of AWS services that GitHub Actions has access to, defined in the project-config module in [project-config/aws-services.tf](/infra/project-config/aws-services.tf) diff --git a/docs/infra/database-access-control.md b/docs/infra/database-access-control.md new file mode 100644 index 00000000..5b645f93 --- /dev/null +++ b/docs/infra/database-access-control.md @@ -0,0 +1,24 @@ +# Database Access Control + +## Manage `postgres` master user password with AWS Secrets Manager + +The master user password is managed by Amazon RDS and Secrets Manager. Managing RDS master user passwords with Secrets Manager provides the following security benefits: + +* RDS rotates database credentials regularly, without requiring application changes. +* Secrets Manager secures database credentials from human access and plain text view. The master password is not even in the terraform state file. + +For more information about the benefits, see [Benefits of managing master user passwords with Secrets Manager](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/rds-secrets-manager.html#rds-secrets-manager-benefits). + +## Database roles and permissions + +The database roles are created by the master user `postgres` when the Role Manager lambda function runs. The following roles are created: + +* **migrator** โ€” The `migrator` role is the role the database migration task assumes. Database migrations are run as part of the deploy workflow before the new container image is deployed to the service. The `migrator` role has permissions to create tables in the `app` schema. +* **app** โ€” The `app` role is the role the application service assumes. The `app` role has read/write permissions in the `app` schema. + +## Database connections + +The database authenticates connections with [IAM database authentication](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/UsingWithRDS.IAMDBAuth.html) (except when connecting as the `postgres` master user). The security benefits of this approach include: + +* The system leverages IAM to centrally manage access to the database +* There are no long-lived user database credentials that need to be stored as database authentication tokens are generated by IAM and have a lifetime of 15 minutes diff --git a/docs/infra/destroy-infrastructure.md b/docs/infra/destroy-infrastructure.md new file mode 100644 index 00000000..0635e330 --- /dev/null +++ b/docs/infra/destroy-infrastructure.md @@ -0,0 +1,59 @@ +# Destroy infrastructure + +To destroy everything you'll need to undeploy all the infrastructure in reverse order that they were created. In particular, the account root module(s) need to be destroyed last. + +## Instructions + +1. First, destroy all your environments. Within `/infra/app/service` run the following, replacing `dev` with the environment you're destroying. + + ```bash + $ terraform init --backend-config=dev.s3.tfbackend + $ terraform destroy -var-file=dev.tfvars + ``` + +2. Then to destroy the backends, first you'll need to add `force_destroy = true` to the S3 buckets, and update the lifecycle block to set `prevent_destroy = false`. Then run `terraform apply` from within the `infra/accounts` directory. The reason we need to do this is because S3 buckets by default are protected from destruction to avoid loss of data. See [Terraform: Destroy/Replace Buckets](https://medium.com/interleap/terraform-destroy-replace-buckets-cf9d63d0029d) for a more in-depth explanation. + + ```terraform + # infra/modules/modules/terraform-backend-s3/main.tf + + resource "aws_s3_bucket" "tf_state" { + bucket = var.state_bucket_name + + force_destroy = true + + # Prevent accidental destruction a developer executing terraform destory in the wrong directory. Contains terraform state files. + lifecycle { + prevent_destroy = false + } + } + + ... + + resource "aws_s3_bucket" "tf_log" { + bucket = var.tf_logging_bucket_name + force_destroy = true + } + ``` + +3. Then since we're going to be destroying the tfstate buckets, you'll want to move the tfstate file out of S3 and back to your local system. Comment out or delete the s3 backend configuration: + + ```terraform + # infra/accounts/main.tf + + # Comment out or delete the backend block + backend "s3" { + ... + }2 + ``` + +4. Then run the following from within the `infra/accounts` directory to copy the `tfstate` back to a local `tfstate` file: + + ```bash + terraform init -force-copy + ``` + +5. Finally, you can run `terraform destroy` within the `infra/accounts` directory. + + ```bash + terraform destroy + ``` diff --git a/docs/infra/environment-variables-and-secrets.md b/docs/infra/environment-variables-and-secrets.md new file mode 100644 index 00000000..e468b95f --- /dev/null +++ b/docs/infra/environment-variables-and-secrets.md @@ -0,0 +1,62 @@ +# Environment variables and secrets + +Applications follow [12-factor app](https://12factor.net/) principles to [store configuration as environment variables](https://12factor.net/config). The infrastructure provides some of these environment variables automatically, such as environment variables to authenticate as the ECS task role, environment variables for database access, and environment variables for accessing document storage. However, many applications require extra custom environment variables for application configuration and for access to secrets. This document describes how to configure application-specific environment variables and secrets. It also describes how to override those environment variables for a specific environment. + +## Application-specific extra environment variables + +Applications may need application specific configuration as environment variables. Examples may includes things like `WORKER_THREADS_COUNT`, `LOG_LEVEL`, `DB_CONNECTION_POOL_SIZE`, or `SERVER_TIMEOUT`. This section describes how to define extra environment variables for your application that are then made accessible to the ECS task by defining the environment variables in the task definition (see AWS docs on [using task definition parameters to pass environment variables to a container](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/taskdef-envfiles.html)). + +> โš ๏ธ Note: Do not put sensitive information such as credentials as regular environment variables. The method described in this section will embed the environment variables and their values in the ECS task definition's container definitions, so anyone with access to view the task definition will be able to see the values of the environment variables. For configuring secrets, see the section below on [Secrets](#secrets) + +Environment variables are defined in the `app-config` module in the [environment-variables.tf file](/infra/app/app-config/env-config/environment-variables.tf). Modify the `default_extra_environment_variables` map to define extra environment variables specific to the application. Map keys define the environment variable name, and values define the default value for the variable across application environments. For example: + +```terraform +# environment-variables.tf + +locals { + default_extra_environment_variables = { + WORKER_THREADS_COUNT = 4 + LOG_LEVEL = "info" + } +} +``` + +To override the default values for a particular environment, modify the `app-config/[environment].tf file` for the environment, and pass overrides to the `env-config` module using the `service_override_extra_environment_variables` variable. For example: + +```terraform +# dev.tf + +module "dev_config" { + source = "./env-config" + service_override_extra_environment_variables = { + WORKER_THREADS_COUNT = 1 + LOG_LEVEL = "debug" + } + ... +} +``` + +## Secrets + +Secrets are a specific category of environment variables that need to be handled sensitively. Examples of secrets are authentication credentials such as API keys for external services. Secrets first need to be stored in AWS SSM Parameter Store as a `SecureString`. This section then describes how to make those secrets accessible to the ECS task as environment variables through the `secrets` configuration in the container definition (see AWS documentation on [retrieving Secrets Manager secrets through environment variables](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/secrets-envvar-secrets-manager.html)). + +Secrets are defined in the same file that non-sensitive environment variables are defined, in the `app-config` module in the [environment-variables.tf file](/infra/app/app-config/env-config/environment-variables.tf). Modify the `secrets` map to define the secrets that the application will have access to. For each secret, the map key defines the environment variable name. The `manage_method` property, which can be set to `"generated"` or `"manual"`, defines whether or not to generate a random secret or to reference an existing secret that was manually created and stored into AWS SSM. The `secret_store_name` property defines the SSM parameter name that stores the secret value. If `manage_method = "generated"`, then `secret_store_name` is where terraform will store the secret. If `manage_method = "manual"`, then `secret_store_name` is where terraform will look for the existing secret. For example: + +```terraform +# environment-variables.tf + +locals { + secrets = { + GENERATED_SECRET = { + manage_method = "generated" + secret_store_name = "/${var.app_name}-${var.environment}/generated-secret" + } + MANUALLY_CREATED_SECRET = { + manage_method = "manual" + secret_store_name = "/${var.app_name}-${var.environment}/manually-created-secret" + } + } +} +``` + +> โš ๏ธ For secrets with `manage_method = "manual"`, make sure you store the secret in SSM Parameter Store *before* you try to add configure your application service with the secrets, or else the service won't be able to start since the ECS Task Executor won't be able to fetch the configured secret. diff --git a/docs/infra/https-support.md b/docs/infra/https-support.md new file mode 100644 index 00000000..aee72d41 --- /dev/null +++ b/docs/infra/https-support.md @@ -0,0 +1,40 @@ +# HTTPS support + +Production systems will want to use HTTPS rather than HTTP to prevent man-in-the-middle attacks. This document describes how HTTPS is configured. This process will: + +1. Issue an SSL/TLS certificate using Amazon Certificate Manager (ACM) for each domain that we want to support HTTPS +2. Associate the certificate with the application's load balancer so that the load balancer can serve HTTPS requests intended for that domain + +## Requirements + +In order to set up HTTPS support you'll also need to have [set up custom domains](/docs/infra/set-up-custom-domains.md). This is because SSL/TLS certificates must be properly configured for the specific domain to support establishing secure connections. + +## 1. Set desired certificates in domain configuration + +For each custom domain you want to set up in the network, define a certificate configuration object and set the `source` to `issued`. You'll probably want at least one custom domain for each application/service in the network. The custom domain must be either the same as the hosted zone or a subdomain of the hosted zone. + +## 2. Update the network layer to issue the certificates + +Run the following command to issue SSL/TLS certificates for each custom domain you configured + +```bash +make infra-update-network NETWORK_NAME= +``` + +Run the following command to check the status of a certificate (replace `` using the output from the previous command): + +```bash +aws acm describe-certificate --certificate-arn --query Certificate.Status +``` + +## 4. Update `enable_https = true` in `app-config` + +Update `enable_https = true` in your application's `app-config` module. You should have already set `domain_name` as part of [setting up custom domain names](/docs/infra/set-up-custom-domains.md). + +## 5. Attach certificate to load balancer + +Run the following command to attach the SSL/TLS certificate to the load balancer + +```bash +make infra-update-app-service APP_NAME= ENVIRONMENT= +``` diff --git a/docs/infra/intro-to-terraform-workspaces.md b/docs/infra/intro-to-terraform-workspaces.md new file mode 100644 index 00000000..0a4a88d4 --- /dev/null +++ b/docs/infra/intro-to-terraform-workspaces.md @@ -0,0 +1,59 @@ +# Workspaces + +Terraform workspaces are created by default, the default workspace is named "default." Workspaces are used to allow multiple engineers to deploy their own stacks for development and testing. This allows multiple engineers to develop new features in parallel using a single environment without destroying each other's infrastructure. Separate resources will be created for each engineer when using the prefix variable. + +## Terraform workspace commands + +`terraform workspace show [Name]` - This command will show the workspace you working in. + +`terraform workspace list [Name]` - This command will list all workspaces. + +`terraform workspace new [Name]` - This command will create a new workspace. + +`terraform workspace select [Name]` - This command will switch your workspace to the workspace you select. + +`terraform workspace delete [Name]` - This command will delete the specified workspace. (does not delete infrastructure, that step will done first) + +## Workspaces and prefix - A How-To + + Workspaces are used to allow multiple developers to deploy their own stacks for development and testing. By default "prefix~ is set to `terraform.workspace` in the envs/dev environment, it is `staging` and `prod` in those respective environments. + +### envs/dev/main.tf + +``` tf +locals { + prefix = terraform.workspace +} + +module "example" { + source = "../../modules/example" + prefix = local.prefix +} + +``` + +### modules/example/variables.tf - When creating a new module create the variable "prefix" in your variables.tf + +``` tf + +variable "prefix" { + type = string + description = "prefix used to uniquely identify resources, allows parallel development" + +} + +``` + +### modules/example/main.tf - Use `var.prefix` to uniquely name resources for parallel development + +``` tf + +# Create the S3 bucket with a unique prefix from terraform.workspace. +resource "aws_s3_bucket" "example" { + bucket = "${var.prefix}-bucket" + +} + +``` + +When in the workspace "shawn", the resulting bucket name created in the aws account will be `shawn-bucket`. This prevents the following undesirable situation: If resources are not actively prefixed and two developers deploy the same resource, the developer who runs their deployment second will overwrite the deployment of the first. diff --git a/docs/infra/intro-to-terraform.md b/docs/infra/intro-to-terraform.md new file mode 100644 index 00000000..f91eddfe --- /dev/null +++ b/docs/infra/intro-to-terraform.md @@ -0,0 +1,33 @@ +# Introduction to Terraform + +## Basic Terraform Commands + +The `terraform init` command is used to initialize a working directory containing Terraform configuration files. This is the first command that should be run after writing a new Terraform configuration or cloning an existing one from version control. + +The `terraform plan` command creates an execution plan, which lets you preview the changes that Terraform plans to make to your infrastructure. By default, when Terraform creates a plan it: + +- Reads the current state of any already existing remote objects to make sure that the Terraform state is up-to-date. +- Compares the current configuration to the prior state, noting any differences. +- Proposes a set of change actions that should, if applied, make the remote objects match the configuration. + +The `terraform apply` command executes the actions proposed in a Terraform plan deploying the infrastructure specified in the configuration. Use with caution. The configuration becomes idempotent once a subsequent apply returns 0 changes. + +The `terraform destroy` command is a convenient way to destroy all remote objects managed by a particular Terraform configuration. Use `terraform plan -destroy` to preview what remote objects will be destroyed if you run `terraform destroy`. + +โš ๏ธ WARNING! โš ๏ธ This is a destructive command! As a best practice, it's recommended that you comment out resources in non-development environments rather than running this command. `terraform destroy` should only be used as a way to clean up a development environment. e.g. a developer's workspace after they are done with it. + +For more information about terraform commands follow the link below: + +- [Basic CLI Features](https://www.terraform.io/cli/commands) + +## Terraform Dependency Lock File + +The [dependency lock file](https://www.terraform.io/language/files/dependency-lock) tracks provider dependencies. It belongs to the configuration as a whole and is created when running `terraform ini`. The lock file is always named `.terraform.lock.hcl`, and this name is intended to signify that it is a lock file for various items that Terraform caches in the `.terraform` subdirectory of your working directory. You should include this file in your version control repository so that you can discuss potential changes to your external dependencies via code review, just as you would discuss potential changes to your configuration itself. + +## Modules + +A module is a container for multiple resources that are used together. Modules can be used to create lightweight abstractions, so that you can describe your infrastructure in terms of its architecture, rather than directly in terms of physical objects. The `.tf` files in your working directory when you run `terraform plan` or `terraform apply` together form the root module. In this root module, you will call modules that you create from the module directory to build the infrastructure required to provide any functionality needed for the application. + +## Terraform Workspaces + +Workspaces are used to allow multiple engineers to deploy their own stacks for development and testing. Read more about it in [Terraform Workspaces](./intro-to-terraform-workspaces.md) diff --git a/docs/infra/making-infra-changes.md b/docs/infra/making-infra-changes.md new file mode 100644 index 00000000..001366de --- /dev/null +++ b/docs/infra/making-infra-changes.md @@ -0,0 +1,56 @@ +# Making and applying infrastructure changes + +## Requirements + +First read [Module Architecture](./module-architecture.md) to understand how the terraform modules are structured. + +## Using make targets (recommended) + +For most changes you can use the Make targets provided in the root level Makefile, and can all be run from the project root. + +Make changes to the account: + +```bash +make infra-update-current-account +``` + +Make changes to the application service in the dev environment: + +```bash +make infra-update-app-service APP_NAME=app ENVIRONMENT=dev +``` + +Make changes to the application build repository (Note that the build repository is shared across environments, so there is no ENVIRONMENT parameter): + +```bash +make infra-update-app-build-repository APP_NAME=app +``` + +You can also pass in extra arguments to `terraform apply` by using the `TF_CLI_ARGS` or `TF_CLI_ARGS_apply` parameter (see [Terraform's docs on TF_CLI_ARGS and TF_CLI_ARGS_name](https://developer.hashicorp.com/terraform/cli/config/environment-variables#tf_cli_args-and-tf_cli_args_name)): + +```bash +# Example +TF_CLI_ARGS_apply='-input=false -auto-approve' make infra-update-app-service APP_NAME=app ENVIRONMENT=dev +TF_CLI_ARGS_apply='-var=image_tag=abcdef1' make infra-update-app-service APP_NAME=app ENVIRONMENT=dev +``` + +## Using terraform CLI wrapper scripts + +An alternative to using the Makefile is to directly use the terraform wrapper scripts that the Makefile uses: + +```bash +project-root$ ./bin/terraform-init app/service dev +project-root$ ./bin/terraform-apply app/service dev +project-root$ ./bin/terraform-init-and-apply app/service dev # calls init and apply in the same script +``` + +Look in the script files for more details on usage. + +## Using Terraform CLI directly + +Finally, if the wrapper scripts don't meet your needs, you can always run `terraform` directly from the root module directory. You may need to do this if you are running terraform commands other than `terraform plan` and `terraform apply`, such as `terraform import`, `terraform taint`, etc. To do this, you'll need to pass in the appropriate `tfvars` and `tfbackend` files to `terraform init` and `terraform apply`. For example, to make changes to the application's service resources in the dev environment, cd to the `infra/app/service` directory and run: + +```bash +infra/app/service$ terraform init -backend-config=dev.s3.tfbackend +infra/app/service$ terraform apply -var-file=dev.tfvars +``` diff --git a/docs/infra/module-architecture.md b/docs/infra/module-architecture.md new file mode 100644 index 00000000..6777faa9 --- /dev/null +++ b/docs/infra/module-architecture.md @@ -0,0 +1,92 @@ +# Terraform module architecture + +This doc describes how the Terraform modules are structured. Directory structure and layers are documented in the [infrastructure README](/infra/README.md). + +## Approach + +The infrastructure code is organized into: + +- root modules +- child modules + +[Root modules](https://www.terraform.io/language/modules#the-root-module) are modules that are deployed separately from each other, whereas child modules are reusable modules that are called from root modules. To deploy all the resources necessary for a given environment, all the root modules must be deployed independently in the correct order. + +For a full list of rationale and factors, see [ADR: Separate app infrastructure into layers](/docs/decisions/infra/0009-separate-app-infrastructure-into-layers.md). + +## Module calling structure + +The following diagram describes the relationship between modules and their child modules. Arrows go from the caller module to the child module. + +```mermaid +flowchart TB + + classDef default fill:#FFF,stroke:#000 + classDef root-module fill:#F37100,stroke-width:3,font-family:Arial + classDef child-module fill:#F8E21A,font-family:Arial + + subgraph infra + account:::root-module + app/network[network]:::root-module + + subgraph app + app/build-repository[build-repository]:::root-module + app/database[database]:::root-module + app/service[service]:::root-module + end + + subgraph modules + terraform-backend-s3:::child-module + auth-github-actions:::child-module + container-image-repository:::child-module + network:::child-module + database:::child-module + web-app:::child-module + end + + account --> terraform-backend-s3 + account --> auth-github-actions + app/network --> network + app/build-repository --> container-image-repository + app/database --> database + app/service --> web-app + + end +``` + +## Module dependencies + +The following diagram illustrates the dependency structure of the root modules. + +1. Account root modules need to be deployed first to create the S3 bucket and DynamoDB tables that are needed to configure the Terraform backends in the rest of the root modules. +2. The application's build repository needs to be deployed next to create the resources needed to store the built release-candidates that are deployed to the application environments. +3. The individual application environment root modules are deployed last once everything else is set up. These root modules are the ones that are deployed regularly as part of application deployments. + +```mermaid +flowchart RL + +classDef default fill:#F8E21A,stroke:#000,font-family:Arial + +app/service --> app/build-repository --> accounts +app/service --> accounts +app/service --> app/network +app/service --> app/database --> app/network --> accounts +app/database --> accounts +``` + +### Guidelines for layers + +When deciding which layer to put an infrastructure resource in, follow the following guidelines. + +* **Default to the service layer** By default, consider putting application resources as part of the service layer. This way the resource is managed together with everything else in the environment, and spinning up new application environments automatically spins up the resource. + +* **Consider variations in the number and types of environments of each layer:** If the resource does not or might not map one-to-one with application environments, consider putting the resource in a different layer. For example, the number of AWS accounts may or may not match the number of VPCs, which may or may not match the number of application environments. As another example, each application only has one instance of a build repository, which is shared across all environments. As a final example, an application may or may not need a database layer at all, so by putting database-related resources in the database layer, and application can skip those resources by skipping the entire layer rather than by needing to change the behavior of an existing layer. Choose the layer for the resource that maps most closely with that resource's purpose. + +* **Consider AWS uniqueness constraints on resources:** This is a special case of the previous consideration: resources that AWS requires to be unique should be managed by a layer that creates only one of that resource per instance of that layer. For example, there can only be one OIDC provider for GitHub actions per AWS account (see [Creating OIDC identity providers](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_oidc.html)), so the OIDC provider should go in the account layer. As another example, there can only be one VPC endpoint per VPC per AWS service (see [Fix conflicting DNS domain errors for interface VPC endpoints](https://repost.aws/knowledge-center/vpc-interface-endpoint-domain-conflict)). Therefore, if multiple application environments share a VPC, they can't each create a VPC endpoint for the same AWS service. As such, the VPC endpoint logically belongs to the network layer and VPC endpoints should be created and managed per network instance rather than per application environment. + +* **Consider policy constraints on what resources the project team is authorized to manage:** Different categories of resources may have different requirements on who is allowed to create and manage those resources. Resources that the project team are not allowed to manage directly should not be mixed with resources that the project team needs to manage directly. + +* **Consider out-of-band dependencies:** Put infrastructure resources that require steps outside of terraform to be completed configured in layers that are upstream to resources that depend on those completed resources. For example, after creating a database cluster, the database schemas, roles, and privileges need to be configured before they can be used by a downstream service. Therefore database resources should be separate from the service layer so that the database can be configured fully before attempting to create the service layer resources. + +## Making changes to infrastructure + +Now that you understand how the modules are structured, see [making changes to infrastructure](./making-infra-changes.md). diff --git a/docs/infra/module-dependencies.md b/docs/infra/module-dependencies.md new file mode 100644 index 00000000..b7903867 --- /dev/null +++ b/docs/infra/module-dependencies.md @@ -0,0 +1,92 @@ +# Managing module dependencies + +These are the principles that guide the design of the infrastructure template. + +## Use explicit outputs and variables to connect resources across child modules in the same root module + +If a resource in module B depends on a resource in module A, and both modules are called from the same root module, then create an output in module A with the information that is needed by module B, and pass that into module B as an input variable. + +```terraform +# root-module/main.tf + +module "a" { + ... +} + +module "b" { + input = module.a.output +} +``` + +This makes the dependencies between the resources explicit: + +```mermaid +flowchart LR + +subgraph A[module A] + output +end + +subgraph B[module B] + input +end + +input -- depends on --> output +``` + +**Do not** use [data sources](https://developer.hashicorp.com/terraform/language/data-sources) to reference resource dependencies in the same root module. A data source does not represent a dependency in [terraform's dependency graph](https://developer.hashicorp.com/terraform/internals/graph), and therefore there will potentially be a race condition, as Terraform will not know that it needs to create/update the resource in module A before it creates/updates the resource in module B that depends on it. + +## Use config modules and data resources to manage dependencies between root modules + +If a resource in root module S depends on a resource in root module R, it is not possible to specify the dependency directly since the resources are managed in separate state files. In this situation, use a [data source](https://developer.hashicorp.com/terraform/language/data-sources) in module S to reference the resource in module R, and use a shared configuration module that specifies identifying information that is used both to create the resource in R and to query for the resource in S. + +```terraform +# root module R + +module "config" { + ... +} + +resource "parent" "p" { + identifier = module.config.parent_identifier +} +``` + +```terraform +# root module S + +module "config" { + ... +} + +data "parent" "p" { + identifier = module.config.parent_identifier +} + +resource "child" "c" { + input = data.parent.p.some_attribute +} +``` + +This makes the dependency explicit, but indirect. Instead of one resource directly depending on the other, both resources depend on a shared config value(s) that uniquely identifies the parent resource. If the parent resource changes, the data source will also change, triggering the appropriate change in the child resource. If identifying information about the parent resource changes, it must be done through the shared configuration module so that the data source's query remains in sync. + +```mermaid +flowchart RL + +subgraph config[config module] + config_value[config value] +end + +subgraph R[root module R] + parent[parent resource] +end + +subgraph S[root module S] + data.parent[parent data source] + child[child resource] +end + +parent -- depends on --> config_value +data.parent -- depends on --> config_value +child -- depends on --> data.parent +``` diff --git a/docs/infra/service-command-execution.md b/docs/infra/service-command-execution.md new file mode 100644 index 00000000..9b5deb16 --- /dev/null +++ b/docs/infra/service-command-execution.md @@ -0,0 +1,76 @@ +# Running commands on the service + +The infrastructure supports developer access to a running application's service container using [ECS Exec](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-exec.html). You can run commands in or get a shell to an actively running container, allowing you to quickly debug issues or to use the container to access an attached database. Once you create an interactive shell, you will be operating with the same permissions as the container (e.g. you may access any database the container has access to, but you cannot access databases within the same account that the container does not have access to). + +โš ๏ธ **Warning: It is not recommended to enable service access in a production environment!** + +## Prerequisites + +* You'll need to have [set up infrastructure tools](./set-up-infrastructure-tools.md), like Terraform, AWS CLI, and AWS authentication +* You'll need to have set up the [app environments](./set-up-app-env.md) +* You'll need to have [installed the Session Manager plugin for the AWS CLI](https://docs.aws.amazon.com/systems-manager/latest/userguide/session-manager-working-with-install-plugin.html) + +## Instructions + +### 1. Make sure you're authenticated into the AWS account that the ECS container is running in + +This takes effect in whatever account you're authenticated into. To see which account that is, run + +```bash +aws sts get-caller-identity +``` + +To see a more human readable account alias instead of the account, run + +```bash +aws iam list-account-aliases +``` + +### 2. Enable service execution access + +Within the `app-config` directory (e.g. `infra//app-config`), each environment has its own config file named after the environment. For example, if the application has three environments `dev`, `staging`, and `prod`, it should have corresponding `dev.tf`, `staging.tf`, and `prod.tf` files. + +In the environment config file for the environment that you want to enable service access, set `enable_command_execution` to `true`. + +### 3. Update the network + +To enable service execution access, the VPC requires an additional VPC endpoint. Update the network by running + +```bash +make infra-update-network NETWORK_NAME= +``` + +`NETWORK_NAME` needs to be the name of the network that the application environment is running in. + +### 4. Update the application service + +To enable service execution access, some configuration changes need to be applied to the ECS Task Definition. Update the service by running + +```bash +make infra-update-app-service APP_NAME= ENVIRONMENT= +``` + +`APP_NAME` needs to be the name of the application folder within the `infra` folder. + +`ENVIRONMENT` needs to be the name of the environment to update. + +### 5. Execute commands + +To create an interactive shell, run + +```bash +aws ecs execute-command --cluster \ + --task \ + --container \ + --interactive \ + --command "/bin/sh" +``` + +To run other commands, modify the `--command` flag to execute the command, rather than starting a shell. + +## Troubleshooting + +If you get an error after running the above steps, these diagnosis steps may be helpful: +1. Verify that `enableExecuteCommand` is `true` on your running task by using `aws ecs describe-tasks --cluster $APP_NAME-$ENVIRONMENT_NAME --task `. If not, run the `infra-update-app-service` command above and/or redeploy your service. +2. Make sure that the SSM Agent is running by checking the `managedAgents` object in the `containers` array of the `aws ecs describe-tasks` command output. If it is `STOPPED`, you may have an issue with your container that is preventing the agent from running. +3. Run the [amazon-ecs-exec-checker](https://github.com/aws-containers/amazon-ecs-exec-checker) script to further pinpoint issues that may prevent ECS Exec from functioning. diff --git a/docs/infra/set-up-app-build-repository.md b/docs/infra/set-up-app-build-repository.md new file mode 100644 index 00000000..eb30bb49 --- /dev/null +++ b/docs/infra/set-up-app-build-repository.md @@ -0,0 +1,40 @@ +# Set up application build repository + +The application build repository setup process will create infrastructure resources needed to store built release-candidate artifacts used to deploy the application to an environment. + +## Requirements + +Before setting up the application's build repository you'll need to have: + +1. [Set up the AWS account](./set-up-aws-account.md) +2. [Configure the app](/infra/app/app-config/main.tf) + +## 1. Configure backend + +To create the `tfbackend` file for the build repository using the backend configuration values from your current AWS account, run + +```bash +make infra-configure-app-build-repository APP_NAME=app +``` + +Pass in the name of the app folder within `infra`. By default this is `app`. + +## 2. Create build repository resources + +Now run the following commands to create the resources, making sure to verify the plan before confirming the apply. + +```bash +make infra-update-app-build-repository APP_NAME=app +``` + +## 3. Check that the build repository was created properly + +Run the [Build and publish GitHub Actions workflow](/.github/workflows/build-and-publish.yml) to build your application's image and publish it to the container image registry you just created. If you have the GitHub CLI installed, you can do this using the following command. + +```bash +gh workflow run build-and-publish.yml --field app_name=app --field ref=main +``` + +## Set up application environments + +Once you set up the deployment process, you can proceed to [set up application environments](./set-up-app-env.md) diff --git a/docs/infra/set-up-app-env.md b/docs/infra/set-up-app-env.md new file mode 100644 index 00000000..23650a97 --- /dev/null +++ b/docs/infra/set-up-app-env.md @@ -0,0 +1,64 @@ +# Set up Application Environment + +The application environment setup process will: + +1. Configure a new application environment and create the infrastructure resources for the application in that environment + +## Requirements + +Before setting up the application's environments you'll need to have: + +1. [A compatible application in the app folder](https://github.com/navapbc/template-infra/blob/main/template-only-docs/application-requirements.md) +2. [Set up the AWS account that this environment is going to use](/docs/infra/set-up-aws-account.md). +3. [Configure the app](/infra/app/app-config/main.tf). + 1. Make sure you update `has_database` to `true` or `false` depending on whether or not your application has a database to integrate with. + 2. Make sure you update `has_external_non_aws_service` to `true` or `false` depending on whether your application utilizes any non-AWS services. Other applications within the same git repo count as external services, so if your application makes API calls to another application service in the same git repo, set `has_external_non_aws_service` to `true`. + 3. If you're configuring your production environment, make sure to update the `service_cpu`, `service_memory`, and `service_desired_instance_count` settings based on the project's needs. If your application is sensitive to performance, consider doing a load test. + 4. Make sure your application environment is using the AWS Account you want to use by checking the `account_name` property in the environment configuration and updating it if necessary. +4. [Create a nondefault VPC to be used by the application](./set-up-network.md) +5. (If the application has external non-AWS services) [Set up network access to the public internet](./set-up-public-internet-access.md) +6. (If the application has a database) [Set up the database for the application](./set-up-database.md) +7. (If you have an incident management service) [Set up monitoring](./set-up-monitoring-alerts.md) +8. [Set up the application build repository](./set-up-app-build-repository.md) + +## 1. Configure backend + +To create the `tfbackend` and `tfvars` files for the new application environment, run + +```bash +make infra-configure-app-service APP_NAME=app ENVIRONMENT= +``` + +`APP_NAME` needs to be the name of the application folder within the `infra` folder. It defaults to `app`. +`ENVIRONMENT` needs to be the name of the environment you are creating. This will create a file called `.s3.tfbackend` in the `infra/app/service` module directory. + +Depending on the value of `has_database` in the [app-config module](/infra/app/app-config/main.tf), the application will be configured with or without database access. + +## 2. Build and publish the application to the application build repository + +Before creating the application resources, you'll need to first build and publish at least one image to the application build repository. + +There are two ways to do this: + +1. Trigger the "Build and Publish" workflow from your repo's GitHub Actions tab. This option requires that the `role-to-assume` GitHub workflow variable has already been set up as part of the overall infra account setup process. +2. Alternatively, run the following from the root directory. This option can take much longer than the GitHub workflow, depending on your machine's architecture. + + ```bash + make release-build APP_NAME=app + make release-publish APP_NAME=app + ``` + +Copy the image tag name that was published. You'll need this in the next step. + +## 3. Create application resources with the image tag that was published + +Now run the following commands to create the resources, using the image tag that was published in the previous step. Review the terraform before confirming "yes" to apply the changes. + +```bash +TF_CLI_ARGS_apply="-var=image_tag=" make infra-update-app-service APP_NAME=app ENVIRONMENT= +``` + +## 4. Configure monitoring alerts + +Configure email alerts, external incident management service integration and additional Cloudwatch Alerts. +[Configure monitoring module](./set-up-monitoring-alerts.md) diff --git a/docs/infra/set-up-aws-account.md b/docs/infra/set-up-aws-account.md new file mode 100644 index 00000000..dd849561 --- /dev/null +++ b/docs/infra/set-up-aws-account.md @@ -0,0 +1,50 @@ +# Set up AWS account + +The AWS account setup process will: + +1. Create the [Terraform backend](https://www.terraform.io/language/settings/backends/configuration) resources needed to store Terraform's infrastructure state files. The project uses an [S3 backend](https://www.terraform.io/language/settings/backends/s3). +2. Create the [OpenID connect provider in AWS](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_oidc.html) to allow GitHub Actions to access AWS account resources. +3. Create the IAM role and policy that GitHub Actions will use to manage infrastructure resources. + +## Prerequisites + +* You'll need to have [set up infrastructure tools](./set-up-infrastructure-tools.md), like Terraform, AWS CLI, and AWS authentication. +* You'll also need to make sure the [project is configured](/infra/project-config/main.tf). + +## Instructions + +### 1. Make sure you're authenticated into the AWS account you want to configure + +The account set up sets up whatever account you're authenticated into. To see which account that is, run + +```bash +aws sts get-caller-identity +``` + +To see a more human readable account alias instead of the account, run + +```bash +aws iam list-account-aliases +``` + +### 2. Create backend resources and tfbackend config file + +Run the following command, replacing `` with a human readable name for the AWS account that you're authenticated into. The account name will be used to prefix the created tfbackend file so that it's easier to visually identify as opposed to identifying the file using the account id. For example, you have an account per environment, the account name can be the name of the environment (e.g. "prod" or "staging"). Or if you are setting up an account for all lower environments, account name can be "lowers". If your AWS account has an account alias, you can also use that. + +```bash +make infra-set-up-account ACCOUNT_NAME= +``` + +This command will create the S3 tfstate bucket and the GitHub OIDC provider. It will also create a `[account name].[account id].s3.tfbackend` file in the `infra/accounts` directory. + +## Making changes to the account + +If you make changes to the account terraform and want to apply those changes, run + +```bash +make infra-update-current-account +``` + +## Destroying infrastructure + +To undeploy and destroy infrastructure, see [instructions on destroying infrastructure](./destroy-infrastructure.md). diff --git a/docs/infra/set-up-custom-domains.md b/docs/infra/set-up-custom-domains.md new file mode 100644 index 00000000..e4ff8474 --- /dev/null +++ b/docs/infra/set-up-custom-domains.md @@ -0,0 +1,73 @@ +# Custom domains + +Production systems will want to set up custom domains to route internet traffic to their application services rather than using AWS-generated hostnames for the load balancers or the CDN. This document describes how to configure custom domains. The custom domain setup process will: + +1. Create an [Amazon Route 53 hosted zone](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/hosted-zones-working-with.html) to manage DNS records for a domain and subdomains +2. Create a DNS A (address) records to route traffic from a custom domain to the application's load balancer + +## Requirements + +Before setting up custom domains you'll need to have [set up the AWS account](./set-up-aws-account.md) + +## 1. Set hosted zone in domain configuration + +Update the value for the `hosted_zone` in the domain configuration. The custom domain configuration is defined as a `domain_config` object in the [network section of the project config module](/infra/project-config/networks.tf). A [hosted zone](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/hosted-zones-working-with.html) represents a domain and all of its subdomains. For example, a hosted zone of `platform-test.navateam.com` includes `platform-test.navateam.com`, `cdn.platform-test.navateam.com`, `notifications.platform-test.navateam.com`, `foo.bar.platform-test.navateam.com`, etc. + +## 2. Update the network layer to create the hosted zone + +Run the following command to create the hosted zone specified in the domain configuration. + +```bash +make infra-update-network NETWORK_NAME= +``` + +## 3. Delegate DNS requests to the newly created hosted zone + +You most likely registered your domain outside of this project. Using whichever service you used to register the domain name (e.g. Namecheap, GoDaddy, Google Domains, etc.), add a DNS NS (nameserver) record. Set the "name" equal to the `hosted_zone` and set the value equal to the list of hosted zone name servers that was created in the previous step. You can see the list of servers by running + +```bash +terraform -chdir=infra/networks output -json hosted_zone_name_servers +``` + +Your NS record might look something like this: + +**Name**: + +```text +platform-test.navateam.com +``` + +**Value**: (Note the periods after each of the server addresses) + +```text +ns-1431.awsdns-50.org. +ns-1643.awsdns-13.co.uk. +ns-687.awsdns-21.net. +ns-80.awsdns-10.com. +``` + +Run the following command to verify that DNS requests are being served by the hosted zone nameservers using `nslookup`. + +```bash +nslookup -type=NS +``` + +## 4. Configure custom domain for your application + +Define the `domain_name` for each of the application environments in the `app-config` module. The `domain_name` must be either the same as the `hosted_zone` or a subdomain of the `hosted_zone`. For example, if your hosted zone is `platform-test.navateam.com`, then `platform-test.navateam.com` and `cdn.platform-test.navateam.com` are both valid values for `domain_name`. + +## 5. Create A (address) records to route traffice from the custom domain to your application's load balancer + +Run the following command to create the A record that routes traffic from the custom domain to the application's load balancer. + +```bash +make infra-update-app-service APP_NAME= ENVIRONMENT= +``` + +## 6. Repeat for each application + +If you have multiple applications in the same network, repeat steps 4 and 5 for each application. + +## Externally managed DNS + +If you plan to manage DNS records outside of the project, then set `network_configs[*].domain_config.manage_dns = false` in [the networks section of the project-config module](/infra/project-config/networks.tf). diff --git a/docs/infra/set-up-database.md b/docs/infra/set-up-database.md new file mode 100644 index 00000000..1262cb68 --- /dev/null +++ b/docs/infra/set-up-database.md @@ -0,0 +1,110 @@ +# Set up database + +The database setup process will: + +1. Configure and deploy an application database cluster using [Amazon Aurora Serverless V2](https://aws.amazon.com/rds/aurora/serverless/) +2. Create a [PostgreSQL schema](https://www.postgresql.org/docs/current/ddl-schemas.html) `app` to contain tables used by the application. +3. Create an IAM policy that allows IAM roles with that policy attached to [connect to the database using IAM authentication](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/UsingWithRDS.IAMDBAuth.Connecting.html) +4. Create an [AWS Lambda function](https://docs.aws.amazon.com/lambda/latest/dg/welcome.html), the "role manager", for provisioning the [PostgreSQL database users](https://www.postgresql.org/docs/8.0/user-manag.html) that will be used by the application service and by the migrations task. +5. Invoke the role manager function to create the `app` and `migrator` Postgres users. + +## Requirements + +Before setting up the database you'll need to have: + +1. [Set up the AWS account](./set-up-aws-account.md) +2. pip installed (pip is needed to download dependencies for the role manager Lambda function) + +## 1. Configure backend + +To create the `tfbackend` file for the new application environment, run + +```bash +make infra-configure-app-database APP_NAME= ENVIRONMENT= +``` + +`APP_NAME` needs to be the name of the application folder within the `infra` folder. By default, this is `app`. +`ENVIRONMENT` needs to be the name of the environment you are creating. This will create a file called `.s3.tfbackend` in the `infra/app/service` module directory. + +### (Optional) Enable any database extensions that require `rds_superuser` + +To enable some extensions, such as [pgvector](https://github.com/pgvector/pgvector), requires the `rds_superuser` role. You can enable any such extensions via the `superuser_extensions` configuration variable, and set them to either enabled or disabled. + +For example, to enable the pgvector extension: + +```terraform +# infra/app/app-config/env-config/main.tf + +database_config = { + ... + + superuser_extensions = { + "vector" : true, # TODO + } +} +``` + +Note that this should only be used for extensions that require the `rds_superuser` role to be created. For many extensions, you can (and should) instead enable them as part of your application's standard database migrations. This [list of trusted extensions from AWS](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_PostgreSQL.html#PostgreSQL.Concepts.General.Extensions.Trusted) shows which extensions can be enabled via a database migrations. + +If you're not sure whether you need to do anything here, you can skip this and come back to it later. + +## 2. Create database resources + +Now run the following commands to create the resources. Review the terraform before confirming "yes" to apply the changes. This can take over 5 minutes. + +```bash +make infra-update-app-database APP_NAME=app ENVIRONMENT= +``` + +## 3. Create Postgres users + +Trigger the role manager Lambda function that was created in the previous step to create the application and `migrator` Postgres users. + +```bash +make infra-update-app-database-roles APP_NAME=app ENVIRONMENT= +``` + +The Lambda function's response should describe the resulting PostgreSQL roles and groups that are configured in the database. It should look like a minified version of the following: + +```json +{ + "roles": [ + "postgres", + "migrator", + "app" + ], + "roles_with_groups": { + "rds_superuser": "rds_password", + "pg_monitor": "pg_read_all_settings,pg_read_all_stats,pg_stat_scan_tables", + "postgres": "rds_superuser", + "app": "rds_iam", + "migrator": "rds_iam" + }, + "schema_privileges": { + "public": "{postgres=UC/postgres,=UC/postgres}", + "app": "{migrator=UC/migrator,app=U/migrator}" + } +} +``` + +### Important note on Postgres table permissions + +Before creating migrations that create tables, first create a migration that includes the following SQL command (or equivalent if your migrations are written in a general-purpose programming language): + +```sql +ALTER DEFAULT PRIVILEGES GRANT ALL ON TABLES TO app +``` + +This will cause all future tables created by the `migrator` user to automatically be accessible by the `app` user. See the [Postgres docs on ALTER DEFAULT PRIVILEGES](https://www.postgresql.org/docs/current/sql-alterdefaultprivileges.html) for more info. As an example see the example app's migrations file [migrations.sql](https://github.com/navapbc/template-infra/blob/main/app/migrations.sql). + +Why is this needed? The reason is that the `migrator` role will be used by the migration task to run database migrations (creating tables, altering tables, etc.), while the `app` role will be used by the web service to access the database. Moreover, in Postgres, new tables won't automatically be accessible by roles other than the creator unless specifically granted, even if those other roles have usage access to the schema that the tables are created in. In other words, if the `migrator` user created a new table `foo` in the `app` schema, the `app` user will not automatically be able to access it by default. + +## 4. Check that database roles have been configured properly + +```bash +make infra-check-app-database-roles APP_NAME=app ENVIRONMENT= +``` + +## Set up application environments + +Once you set up the deployment process, you can proceed to [set up the application service](./set-up-app-env.md) diff --git a/docs/infra/set-up-infrastructure-tools.md b/docs/infra/set-up-infrastructure-tools.md new file mode 100644 index 00000000..8e59d60a --- /dev/null +++ b/docs/infra/set-up-infrastructure-tools.md @@ -0,0 +1,108 @@ +# Set up infrastructure developer tools + +If you are contributing to infrastructure, you will need to complete these setup steps. + +## Prerequisites + +### Install Terraform + +[Terraform](https://www.terraform.io/) is an infrastructure as code (IaC) tool that allows you to build, change, and version infrastructure safely and efficiently. This includes both low-level components like compute instances, storage, and networking, as well as high-level components like DNS entries and SaaS features. + +You may need different versions of Terraform since different projects may require different versions of Terraform. The best way to manage Terraform versions is with [Terraform Version Manager](https://github.com/tfutils/tfenv). + +To install via [Homebrew](https://brew.sh/) + +```bash +brew install tfenv +``` + +Then install the version of Terraform you need. + +```bash +tfenv install 1.8.0 +``` + +You may need to set the Terraform version. + +```bash +tfenv use 1.8.0 +``` + +If you are unfamiliar with Terraform, check out this [basic introduction to Terraform](./intro-to-terraform.md). + +### Install AWS CLI + +The [AWS Command Line Interface (AWS CLI)](https://aws.amazon.com/cli/) is a unified tool to manage your AWS services. With just one tool to download and configure, you can control multiple AWS services from the command line and automate them through scripts. Install the AWS command line tool by following the instructions found here: + +- [Install AWS CLI](https://docs.aws.amazon.com/cli/latest/userguide/getting-started-install.html) + +### Install Go + +The [Go programming language](https://go.dev/dl/) is required to run [Terratest](https://terratest.gruntwork.io/), the unit test framework for Terraform. + +### Install GitHub CLI + +The [GitHub CLI](https://cli.github.com/) is useful for automating certain operations for GitHub such as with GitHub actions. This is needed to run [check-github-actions-auth](/bin/check-github-actions-auth) + +```bash +brew install gh +``` + +### Install linters + +We have several optional utilities for running infrastructure linters locally. These are run as part of the CI pipeline, therefore, it is often simpler to test them locally first. + +- [Shellcheck](https://github.com/koalaman/shellcheck) +- [actionlint](https://github.com/rhysd/actionlint) +- [markdown-link-check](https://github.com/tcort/markdown-link-check) + +```bash +brew install shellcheck +brew install actionlint +make infra-lint +``` + +## AWS Authentication + +In order for Terraform to authenticate with your accounts you will need to configure your AWS credentials using the AWS CLI or manually create your config and credentials file. If you need to manage multiple credentials or create named profiles for use with different environments you can add the `--profile` option. + +There are multiple ways to authenticate, but we recommend creating a separate profile for your project in your AWS credentials file and setting your local environment variable `AWS_PROFILE` to the profile name. We recommend using [direnv](https://direnv.net/) to manage local environment variables. +**Credentials should be located in ~/.aws/credentials** (Linux & Mac) or **%USERPROFILE%\.aws\credentials** (Windows) + +### Examples + +```bash +$ aws configure +AWS Access Key ID [None]: AKIAIOSFODNN7EXAMPLE +AWS Secret Access Key [None]: wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY +Default region name [None]: us-east-2 +Default output format [None]: json +``` + +**Using the above command will create a [default] profile.** + +```bash +$ aws configure --profile dev +AWS Access Key ID [None]: AKIAIOSFODNN7EXAMPLE +AWS Secret Access Key [None]: wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY +Default region name [None]: us-east-2 +Default output format [None]: json +``` + +**Using the above command will create a [dev] profile.** + +Once you're done, verify access by running the following command to print out information about the AWS IAM user you authenticated as. + +```bash +aws sts get-caller-identity +``` + +### References + +- [Configuration basics][1] +- [Named profiles for the AWS CLI][2] +- [Configuration and credential file settings][3] + +[1]: https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-quickstart.html +[2]: https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-profiles.html +[3]: https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-files.html diff --git a/docs/infra/set-up-monitoring-alerts.md b/docs/infra/set-up-monitoring-alerts.md new file mode 100644 index 00000000..5dd08b14 --- /dev/null +++ b/docs/infra/set-up-monitoring-alerts.md @@ -0,0 +1,29 @@ +# Set up monitoring notifications + +## Overview + +The monitoring module defines metric-based alerting policies that provide awareness into issues with the cloud application. The module supports integration with external incident management tools like Splunk-On-Call or Pagerduty. It also supports email alerts. + +### Set up email alerts. + +1. Add the `email_alerts_subscription_list` variable to the monitoring module call in the service layer + +For example: +``` +module "monitoring" { + source = "../../modules/monitoring" + email_alerts_subscription_list = ["email1@email.com", "email2@email.com"] + ... +} +``` +2. Run `make infra-update-app-service APP_NAME= ENVIRONMENT=` to apply the changes to each environment. +When any of the alerts described by the module are triggered notification will be sent to all emails specified in the `email_alerts_subscription_list` + +### Set up External incident management service integration. + +1. Set setting `has_incident_management_service = true` in app-config/main.tf +2. Get the integration URL for the incident management service and store it in AWS SSM Parameter Store by running the following command for each environment: +``` +make infra-configure-monitoring-secrets APP_NAME= ENVIRONMENT= URL= +``` +3. Run `make infra-update-app-service APP_NAME= ENVIRONMENT=` to apply the changes to each environment. diff --git a/docs/infra/set-up-network.md b/docs/infra/set-up-network.md new file mode 100644 index 00000000..f5b7e3c5 --- /dev/null +++ b/docs/infra/set-up-network.md @@ -0,0 +1,40 @@ +# Set up network + +The network setup process will configure and deploy network resources needed by other modules. In particular, it will: + +1. Create a nondefault VPC +2. Create public subnets for publicly accessible resources such as the application load balancer, private subnets for the application service, and private subnets for the database. +3. Create VPC endpoints for the AWS services needed by ECS Fargate to fetch the container image and log to AWS CloudWatch. If your application has a database, it will also create VPC endpoints for the AWS services needed by the database layer and a security group to contain those VPC endpoints. + +## Requirements + +Before setting up the network you'll need to have: + +1. [Set up the AWS account](./set-up-aws-account.md) +2. Optionally adjust the configuration for the networks you want to have on your project in the [project-config module](/infra/project-config/networks.tf). By default, there are three networks defined, one for each application environment. If you have multiple apps and want your applications in separate networks, you may want to give the networks differentiating names (e.g. "foo-dev", "foo-prod", "bar-dev", "bar-prod", instead of just "dev", "prod"). + 1. Optionally, [configure custom domains](/docs/infra/set-up-custom-domains.md). You can also come back to setting up custom domains at a later time. + 2. Optionally, [configure HTTPS support](/docs/infra/https-support.md). You can also come back to setting up HTTPS support at a later time. +3. [Configure the app](/infra/app/app-config/main.tf). + 1. Update `has_database` to `true` or `false` depending on whether or not your application has a database to integrate with. This setting determines whether or not to create VPC endpoints needed by the database layer. + 2. Update `has_external_non_aws_service` to `true` or `false` depending on whether or not your application makes calls over the public internet. Set this to `true` (a) if your application makes calls to a SaaS service, or (b) if your application needs to call services from another application in the same git repo. This setting determines whether or not to create NAT gateways, which allows the service in the private subnet to make requests to the internet. For more information, see [set up network access to the public internet](./set-up-public-internet-access.md) + 3. If you made changes to the configuration of the networks in the optional step 2 above and or to the default application environments: Update `network_name` for your application environments. This mapping ensures that each network is configured appropriately based on the application(s) in that network (see `local.apps_in_network` in [/infra/networks/main.tf](/infra/networks/main.tf)) Failure to set the network name properly means that the network layer may not receive the correct application configurations for `has_database` and `has_external_non_aws_service`. + +## 1. Configure backend + +To create the `tfbackend` file for the new network, run + +```bash +make infra-configure-network NETWORK_NAME= +``` + +## 2. Create network resources + +Now run the following commands to create the resources. Review the terraform before confirming "yes" to apply the changes. + +```bash +make infra-update-network NETWORK_NAME= +``` + +## Updating the network + +If you make changes to your application's configuration that impact the network (such as `has_database` and `has_external_non_aws_service`), make sure to update the network before you update or deploy subsequent infrastructure layers. diff --git a/docs/infra/set-up-public-internet-access.md b/docs/infra/set-up-public-internet-access.md new file mode 100644 index 00000000..58a9f574 --- /dev/null +++ b/docs/infra/set-up-public-internet-access.md @@ -0,0 +1,32 @@ +# Public internet access + +Some applications depend on external services that are not provided directly by AWS. External services include: + +1. Software as a service (SaaS) providers like New Relic +2. Custom API applications in the same git repository + +Applications that depend on external services need access to the public internet via a NAT (Network Address Translation) gateway. This document describes how to configure public internet access for your application. The setup process will: + +1. Create a [NAT gateway](https://docs.aws.amazon.com/vpc/latest/userguide/vpc-nat-gateway.html) for each availability zone in your virtual network + +Note: To access services that are provided directly by AWS, you can access them over the public internet by enabling public internet access, or you can alternatively use [VPC endpoints](https://docs.aws.amazon.com/whitepapers/latest/aws-privatelink/what-are-vpc-endpoints.html) to keep network traffic entirely within the VPC. + +## 1. Configure `has_external_non_aws_service` property in app-config module + +In the `infra//app-config` module, set `has_external_non_aws_service` to `true`. + +## 2. Create or update the network + +If you are creating new network(s), follow the instructions in [set up network](./set-up-network.md) + +If you are updating existing networks, run the following command for each network used by your application's environments (look at `network_name` for each environment in your application's `app-config` module). + +```bash +make infra-update-network NETWORK_NAME= +``` + +## 3. Check that your application can access the internet + +Check that your application can access the internet. If your application already has an endpoint or background job that calls the internet, you can exercise that code path without needing to re-deploy the application. If not, you can test internet access by introducing a simple endpoint that accesses some public URL (e.g. google.com). + +Repeat this step for each application environment. diff --git a/docs/infra/style-guide.md b/docs/infra/style-guide.md new file mode 100644 index 00000000..d39fd452 --- /dev/null +++ b/docs/infra/style-guide.md @@ -0,0 +1,52 @@ +# Style guide + +## Table of contents + +- [Style guide](#style-guide) + - [Table of contents](#table-of-contents) + - [Terraform code style](#terraform-code-style) + - [Exceptions and additions to Hashicorp's Terraform style guide](#exceptions-and-additions-to-hashicorps-terraform-style-guide) + - [Modules](#modules) + - [Variables](#variables) + - [.gitignore](#gitignore) + - [Integration and unit testing](#integration-and-unit-testing) + - [Policy](#policy) + - [Shell script style](#shell-script-style) + +## Terraform code style + +Follow [Hashicorp's Terraform style guide](https://developer.hashicorp.com/terraform/language/style) when writing Terraform code, with a few exceptions (see below). + +### Exceptions and additions to Hashicorp's Terraform style guide + +Here are some exceptions (and additions) to Hashicorp's Terraform style guide. + +#### Modules + +- Use module names based on the logical function of the module rather than the underlying proprietary service used for implementing the module. For example, use "database" instead of "rds", or "storage" instead of "s3". +- Organize resources according to the infrastructure layers described in [module architecture](/docs/infra/module-architecture.md). +- [Use shared configuration](/docs/infra/module-dependencies.md) instead of the [tfe_outputs data source](https://registry.terraform.io/providers/hashicorp/tfe/latest/docs/data-sources/outputs) to share state between two state files. + +#### Variables + +- Include additional type information in string variable names to clarify the value being stored. For example, use `access_policy_arn` instead of `access_policy`. Common examples of suffixes include: `_id`, `_arn`, and `_name`. +- Include units in numerical variable names. For example, use `max_request_seconds` instead of `max_request_time`. +- Use plural nouns for lists. For example, use `subnet_ids` to represent a list of subnet ids. +- Use `values_by_key` for maps that map keys to values. For example use `account_ids_by_name` to represent a map from account names to account ids. +- For boolean feature flags, use the prefix `enable_`, as in `enable_https`. + +#### .gitignore + +- Do not commit the `.terraform.lock.hcl` dependency lock file. As of Feb 2023, Terraform lock files, while well intentioned, have a tendency to get into a confusing state that requires recreating the lock file, which defeats the purpose. Moreover, lock files are per environment, which can make it difficult for people to upgrade dependencies (e.g. upgrade an AWS provider) across environments if certain environments are locked down (e.g. production). + +#### Integration and unit testing + +- For testing, use [Terratest](https://terratest.gruntwork.io/docs/) instead of the [Terraform test framework](https://developer.hashicorp.com/terraform/language/tests). + +#### Policy + +- For policy enforcement and compliance checks, [Tfsec](https://github.com/aquasecurity/tfsec) is used instead of [Terraform's policy enforcement framework](https://developer.hashicorp.com/terraform/cloud-docs/policy-enforcement) + +## Shell script style + +Follow [Google's Shell Style Guide](https://google.github.io/styleguide/shellguide.html). diff --git a/docs/infra/upgrade-database.md b/docs/infra/upgrade-database.md new file mode 100644 index 00000000..0c99a204 --- /dev/null +++ b/docs/infra/upgrade-database.md @@ -0,0 +1,84 @@ +# Upgrade database + +Upgrading the database between major versions (e.g., from Postgres 15 to 16) is a two-step process. + +1. Create a new DBParameterGroup for the new engine version and upgrade the database. +2. Remove the old DBParamaterGroup for the prior engine version. + +These steps are a minimal starting point for the changes you'll need to make. As with any major change to your codebase, you should carefully test the impact of upgrading the database before applying it to a production environment. See also the AWS documentation for [Upgrading the PostgreSQL DB engine for Amazon RDS](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_UpgradeDBInstance.PostgreSQL.html#USER_UpgradeDBInstance.PostgreSQL.MajorVersion.Process). + +## 1. Creating a new DBParameterGroup and upgrading the database + +1. Set `allow_major_version_upgrade = true` + +Set the `aws_rds_cluster` resource in [infra/modules/database/main.tf#L20](../../infra/modules/database/main.tf). + +2. (if needed) Update the `serverlessv2_scaling_configuration` + +Set the `min_capacity` to 4.0 (and adjust the `max_capacity` accordingly). +If your minimum is lower than this, the upgrade will fail with `FATAL: shared memory segment sizes are configured too large`. + +3. Create a new DBParamaterGroup + +The database will need access to a new parameter group as part of the upgrade, but the old parameter group can't be deleted until the upgrade is complete. + +Make a copy of the `rds_query_logging` resource. +In the original, replace the `${local.engine_major_version}` variable with your current database version. +Then, in the duplicate version, modify the resource name to a new unique value. + +E.g., if you were moving from Postgres 14 to Postgres 15, your configuration would look like: + +```terraform +# This is the original; note we are manually specifying the family is v14 since after the changes are applied the new engine major version will be 15. +resource "aws_rds_cluster_parameter_group" "rds_query_logging" { + family = "aurora-postgresql14" + + ... +} + +# This is the new parameter group; we have given it a new name to distinguish it. +resource "aws_rds_cluster_parameter_group" "rds_query_logging_15" { + family = "aurora-postgresql${local.engine_major_version}" + + ... +} +``` + +Modify the `db_cluster_parameter_group_name` to reference this new parameter group: + +```terraform +resource "aws_rds_cluster" "db" { + ... + db_cluster_parameter_group_name = aws_rds_cluster_parameter_group.rds_query_logging_15.name + ... +} +``` + +4. Set the `engine_version` to your new desired version. + +5. Run `make infra-update-app-database APP_NAME= ENVIRONMENT=` + +Note that the upgrade is not applied immediately; it is queued for the next maintenance window. + +If you wish to apply the upgrade immediately, you can manually change the engine version to match in the AWS Console. See also: + + - https://developer.hashicorp.com/terraform/tutorials/aws/aws-rds + - https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Overview.DBInstance.Modifying.html + + +## 2. Removing the old DBParameter group + +Once the upgrade has been applied, you can remove the old parameter group. + +You should also remove `allow_major_version_upgrade = true` (or set it to false). + +If you had to increase your autoscaling settings to support the upgrade, you may wish to revert that change now as well. + +Finally, the new DBParameter group will have a new resource name (e.g., in the example above, `rds_query_logging_15`). You can revert this to the original name (`rds_query_logging`) without modifying the infrastructure by using [Terraform's moved block](https://developer.hashicorp.com/terraform/cli/state/move), e.g.: + +```terraform +moved { + from = aws_rds_cluster_parameter_group.rds_query_logging_15 + to = aws_rds_cluster_parameter_group.rds_query_logging +} +``` \ No newline at end of file diff --git a/docs/infra/vulnerability-management.md b/docs/infra/vulnerability-management.md new file mode 100644 index 00000000..b2b2d928 --- /dev/null +++ b/docs/infra/vulnerability-management.md @@ -0,0 +1,39 @@ +# Vulnerability Management for Docker Images +This repository contains a GitHub workflow that allows you to scan Docker images for vulnerabilities. The workflow, named `ci-vulnerability-scans` is located in the directory `.github/workflows`. The goal in scanning the image before pushing it to the repository is so that you can catch any vulnerabilities before deploying the image, ECR scanning takes time and the image can still be used even with vulnerabilities found by Inspector. Also, if you use `scratch` as a base image, ECR is unable to scan the image when it is pushed, which is a known issue. + +A way to ensure that there are smaller surface areas for vulnerabilities, follow this method of building images +- Build base image with required packages, name it something like `build` +- Configure app build from the image in the previous step, name it something like `app-build` +- Create a final image from `scratch` named `release` (ie `from scratch as release`), and copy any needed directories from the `app-build` image + +``` +FROM ... AS build +# Do base installs for dev and app-build here +FROM build AS dev +# Local dev installs only +FROM build AS app-build +# All installs for the release image +# Any tweaks needed for the release image +FROM scratch AS release +# Copy over the files from app-build +# COPY --from=app-build /app-build/paths/to/files /release/paths/to/files +``` + +By following this method, your deployment image will have the minimum required directories and files, it will shrink the overall image size, and reduce findings + +## How to use Workflow +The workflow will run whenever there is a push to a PR or when merged to `main` if there are changes in the `app` directory. It is scanning in both cases to ensure there are no issues if a PR is approved on a Friday, but isn't merged till Monday - a CVE could have been found in the time between the last run and the merge. + +## Notes about Scanners + +### Hadolint +The hadolint scanner allows you to ignore or safelist certain findings, which can be specified in the [.hadolint.yaml](../../.hadolint.yaml) file. There is a template file here that you can use in your repo. + +### Trivy +The trivy scanner allows you to ignore or safelist certain findings, which can be specified in the [.trivyignore](../../.trivyignore) file. There is a template file here that you can use in your repo. + +### Anchore +The anchore scanner allows you to ignore or safelist certain findings, which can be specified in the [.grype.yml](../../.grype.yml) file. There is a template file here that you can use in your repo. There are flags set to ignore findings that are in the state `not-fixed`, `wont-fix`, and `unknown`. + +### Dockle +The dockle scanner action does not have the ability to use an ignore or safelist findings file, but is able to by specifying an allow file, or `DOCKLE_ACCEPT_FILES`, environmental variable. To get around this, before the dockle scan runs, a prior step checks for a file named [.dockleconfig](../../.dockleconfig) and pipes it to the environmental variable if it exists. Note that this will not ignore finding types like the other scanner's ignore file, but ignore the file specified in the list. diff --git a/docs/releases.md b/docs/releases.md new file mode 100644 index 00000000..68a05e62 --- /dev/null +++ b/docs/releases.md @@ -0,0 +1,21 @@ +# Release Management + +## Building a release + +To build a release, run + +```bash +make release-build +``` + +This builds the release from [app/Dockerfile](../app/Dockerfile). The Dockerfile +needs to have a build stage called `release` to act as the build target. +(See [Name your build stages](https://docs.docker.com/build/building/multi-stage/#name-your-build-stages)) + +## Publishing a release + +TODO + +## Deploying a release + +TODO diff --git a/docs/system-architecture.md b/docs/system-architecture.md new file mode 100644 index 00000000..ba22b5f5 --- /dev/null +++ b/docs/system-architecture.md @@ -0,0 +1,21 @@ +# System Architecture + +This diagram shows the system architecture. [๐Ÿ”’ Make a copy of this Lucid template for your own application](https://lucid.app/lucidchart/8851888e-1292-4228-8fef-60a61c6b57e7/edit). + +![System architecture](https://lucid.app/publicSegments/view/e5a36152-200d-4d95-888e-4cdbdab80d1b/image.png) + +* **Access Logs** โ€” Amazon S3 bucket storing the application service's access logs. +* **Alarms SNS Topic** โ€” SNS topic that notifies the incident management service when an alarm triggers. +* **Application Load Balancer** โ€” Amazon application load balancer. +* **Aurora PostgreSQL Database** โ€” Amazon Aurora Serverless PostgreSQL database used by the application. +* **Build Repository ECR Registry** โ€” Amazon ECR registry that acts as the build repository of application container images. +* **CloudWatch Alarms** โ€” Amazon CloudWatch Alarms that trigger on errors and latency. +* **CloudWatch Evidently Feature Flags** โ€” Amazon CloudWatch Evidently service that manages feature flags used by the application to manage feature launches. +* **Database Role Manager** โ€” AWS Lambda serverless function that provisions the database roles needed by the application. +* **GitHub** โ€” Source code repository. Also responsible for Continuous Integration (CI) and Continuous Delivery (CD) workflows. GitHub Actions builds and deploys releases to an Amazon ECR registry that stores Docker container images for the application service. +* **Incident Management Service** โ€” Incident management service (e.g. PagerDuty or Splunk On-Call) for managing on-call schedules and paging engineers for urgent production issues. +* **Service** โ€” Amazon ECS service running the application. +* **Terraform Backend Bucket** โ€” Amazon S3 bucket used to store terraform state files. +* **Terraform Locks DynamoDB Table** โ€” Amazon DynamoDB table used to manage concurrent access to terraform state files. +* **VPC Endpoints** โ€” VPC endpoints are used by the Database Role Manager to access Amazon Services without traffic leaving the VPC. +* **VPC Network** โ€” Amazon VPC network. diff --git a/infra/.gitignore b/infra/.gitignore new file mode 100644 index 00000000..2cafcb4d --- /dev/null +++ b/infra/.gitignore @@ -0,0 +1,28 @@ +# Local .terraform metadata +**/.terraform/* + +# .tfstate files +*.tfstate +*.tfstate.* + +# Crash log files +crash.log + +# Ignore override files as they are usually used to override resources locally and so +# are not checked in +override.tf +override.tf.json +*_override.tf +*_override.tf.json + +# Include override files you do wish to add to version control using negated pattern +# +# !example_override.tf + +# Include tfplan files to ignore the plan output of command: terraform plan -out=tfplan +# example: *tfplan* +*.plan +*.tfstate + +# Ignore archives used for deploying lambdas +*.zip diff --git a/infra/README.md b/infra/README.md new file mode 100644 index 00000000..d91abf43 --- /dev/null +++ b/infra/README.md @@ -0,0 +1,85 @@ +# Overview + +This project practices infrastructure-as-code and uses the [Terraform framework](https://www.terraform.io). This directory contains the infrastructure code for this project, including infrastructure for all application resources. This terraform project uses the [AWS provider](https://registry.terraform.io/providers/hashicorp/aws/latest/docs). It is based on the [Nava platform infrastructure template](https://github.com/navapbc/template-infra). + +## ๐Ÿ“‚ Directory structure + +The structure for the infrastructure code looks like this: + +```text +infra/ Infrastructure code + accounts/ [Root module] IaC and IAM resources + [app_name]/ Application directory: infrastructure for the main application + modules/ Reusable child modules + networks/ [Root module] Account level network config (shared across all apps, environments, and terraform workspaces) +``` + +Each application directory contains the following: + +```text + app-config/ Application-level configuration for the application resources (different config for different environments) + build-repository/ [Root module] Docker image repository for the application (shared across environments and terraform workspaces) + database/ [Root module] Configuration for database (different config for different environments) + service/ [Root module] Configuration for containers, such as load balancer, application service (different config for different environments) +``` + +Details about terraform root modules and child modules are documented in [module-architecture](/docs/infra/module-architecture.md). + +## ๐Ÿ—๏ธ Project architecture + +### ๐Ÿง… Infrastructure layers + +The infrastructure template is designed to operate on different layers: + +- Account layer +- Network layer +- Build repository layer (per application) +- Database layer (per application) +- Service layer (per application) + +### ๐Ÿœ๏ธ Application environments + +This project has the following AWS environments: + +- `dev` +- `staging` +- `prod` + +The environments share the same root modules but will have different configurations. Backend configuration is saved as [`.tfbackend`](https://developer.hashicorp.com/terraform/language/settings/backends/configuration#file) files. Most `.tfbackend` files are named after the environment. For example, the `[app_name]/service` infrastructure resources for the `dev` environment are configured via `dev.s3.tfbackend`. Resources for a module that are shared across environments, such as the build-repository, use `shared.s3.tfbackend`. Resources that are shared across the entire account (e.g. /infra/accounts) use `..s3.tfbackend`. + +### ๐Ÿ”€ Project workflow + +This project relies on Make targets in the [root Makefile](/Makefile), which in turn call shell scripts in [./bin](/bin). The shell scripts call `terraform` commands. Many of the shell scripts are also called by the [Github Actions CI/CD](/.github/workflows). + +Generally, you should use the Make targets or the underlying bin scripts, but you can call the underlying terraform commands if needed. See [making-infra-changes](/docs/infra/making-infra-changes.md) for more details. + +## ๐Ÿ’ป Development + +### 1๏ธโƒฃ First time initialization + +To set up this project for the first time (i.e., it has never been deployed to the target AWS account): + +1. [Install this template](/README.md#installation) into an application that meets the [Application Requirements](/README.md#application-requirements) +2. [Configure the project](/infra/project-config/main.tf) (These values will be used in subsequent infra setup steps to namespace resources and add infrastructure tags.) +3. [Set up infrastructure developer tools](/docs/infra/set-up-infrastructure-tools.md) +4. [Set up AWS account](/docs/infra/set-up-aws-account.md) +5. [Set up the virtual network (VPC)](/docs/infra/set-up-network.md) +6. For each application: + 1. [Set up application build repository](/docs/infra/set-up-app-build-repository.md) + 2. [Set up application database](/docs/infra/set-up-database.md) + 3. [Set up application environment](/docs/infra/set-up-app-env.md) + 4. [Configure environment variables and secrets](/docs/infra/environment-variables-and-secrets.md) + 5. [Set up background jobs](/docs/infra/background-jobs.md) + +### ๐Ÿ†• New developer + +To get set up as a new developer on a project that has already been deployed to the target AWS account: + +1. [Set up infrastructure developer tools](/docs/infra/set-up-infrastructure-tools.md) +2. [Review how to make changes to infrastructure](/docs/infra/making-infra-changes.md) +3. [Review the infrastructure style guide](/docs/infra/style-guide.md) +4. (Optional) Set up a [terraform workspace](/docs/infra/intro-to-terraform-workspaces.md) + +## ๐Ÿ“‡ Additional reading + +Additional documentation can be found in the [documentation directory](/docs/infra). diff --git a/infra/accounts/main.tf b/infra/accounts/main.tf new file mode 100644 index 00000000..94f00ba4 --- /dev/null +++ b/infra/accounts/main.tf @@ -0,0 +1,54 @@ +data "aws_caller_identity" "current" {} +data "aws_region" "current" {} + +locals { + # This must match the name of the bucket created while bootstrapping the account in set-up-current-account + tf_state_bucket_name = "${module.project_config.project_name}-${data.aws_caller_identity.current.account_id}-${data.aws_region.current.name}-tf" + + # Choose the region where this infrastructure should be deployed. + region = module.project_config.default_region + + # Set project tags that will be used to tag all resources. + tags = merge(module.project_config.default_tags, { + description = "Backend resources required for terraform state management and GitHub authentication with AWS." + }) +} + +terraform { + + required_version = "~>1.8.0" + + required_providers { + aws = { + source = "hashicorp/aws" + version = "~>5.6.0" + } + } + + backend "s3" { + encrypt = "true" + } +} + +provider "aws" { + region = local.region + default_tags { + tags = local.tags + } +} + +module "project_config" { + source = "../project-config" +} + +module "backend" { + source = "../modules/terraform-backend-s3" + name = local.tf_state_bucket_name +} + +module "auth_github_actions" { + source = "../modules/auth-github-actions" + github_actions_role_name = module.project_config.github_actions_role_name + github_repository = module.project_config.code_repository + allowed_actions = [for aws_service in module.project_config.aws_services : "${aws_service}:*"] +} diff --git a/infra/accounts/outputs.tf b/infra/accounts/outputs.tf new file mode 100644 index 00000000..2ec8af03 --- /dev/null +++ b/infra/accounts/outputs.tf @@ -0,0 +1,23 @@ +output "project_name" { + value = module.project_config.project_name +} + +output "account_id" { + value = data.aws_caller_identity.current.account_id +} + +output "region" { + value = data.aws_region.current.name +} + +output "tf_state_bucket_name" { + value = module.backend.tf_state_bucket_name +} + +output "tf_log_bucket_name" { + value = module.backend.tf_log_bucket_name +} + +output "tf_locks_table_name" { + value = module.backend.tf_locks_table_name +} diff --git a/infra/app/app-config/dev.tf b/infra/app/app-config/dev.tf new file mode 100644 index 00000000..69d5d549 --- /dev/null +++ b/infra/app/app-config/dev.tf @@ -0,0 +1,20 @@ +module "dev_config" { + source = "./env-config" + project_name = local.project_name + app_name = local.app_name + default_region = module.project_config.default_region + environment = "dev" + account_name = "dev" + network_name = "dev" + domain_name = "decision-support-tool-dev.navateam.com" + enable_https = true + has_database = true + has_incident_management_service = local.has_incident_management_service + service_cpu = 1024 + service_memory = 4096 + + # Enables ECS Exec access for debugging or jump access. + # See https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-exec.html + # Defaults to `false`. Uncomment the next line to enable. + enable_command_execution = true +} diff --git a/infra/app/app-config/env-config/database.tf b/infra/app/app-config/env-config/database.tf new file mode 100644 index 00000000..4ee652a8 --- /dev/null +++ b/infra/app/app-config/env-config/database.tf @@ -0,0 +1,17 @@ +locals { + database_config = var.has_database ? { + region = var.default_region + cluster_name = "${var.app_name}-${var.environment}" + app_username = "app" + migrator_username = "migrator" + schema_name = var.app_name + app_access_policy_name = "${var.app_name}-${var.environment}-app-access" + migrator_access_policy_name = "${var.app_name}-${var.environment}-migrator-access" + + # Enable extensions that require the rds_superuser role to be created here + # See docs/infra/set-up-database.md for more information + superuser_extensions = { + "vector" : true + } + } : null +} diff --git a/infra/app/app-config/env-config/environment-variables.tf b/infra/app/app-config/env-config/environment-variables.tf new file mode 100644 index 00000000..9a9f1d0e --- /dev/null +++ b/infra/app/app-config/env-config/environment-variables.tf @@ -0,0 +1,43 @@ +locals { + # Map from environment variable name to environment variable value + # This is a map rather than a list so that variables can be easily + # overridden per environment using terraform's `merge` function + default_extra_environment_variables = { + # Example environment variables + # WORKER_THREADS_COUNT = 4 + # LOG_LEVEL = "info" + # DB_CONNECTION_POOL_SIZE = 5 + } + + # Configuration for secrets + # List of configurations for defining environment variables that pull from SSM parameter + # store. Configurations are of the format + # { + # ENV_VAR_NAME = { + # manage_method = "generated" # or "manual" for a secret that was created and stored in SSM manually + # secret_store_name = "/ssm/param/name" + # } + # } + secrets = { + # Example generated secret + # RANDOM_SECRET = { + # manage_method = "generated" + # secret_store_name = "/${var.app_name}-${var.environment}/random-secret" + # } + + OPENAI_API_KEY = { + manage_method = "manual" + secret_store_name = "/${var.app_name}-${var.environment}/OPENAI_API_KEY" + } + LITERAL_API_KEY = { + manage_method = "manual" + secret_store_name = "/${var.app_name}-${var.environment}/LITERAL_API_KEY" + } + + # Example secret that references a manually created secret + # SECRET_SAUCE = { + # manage_method = "manual" + # secret_store_name = "/${var.app_name}-${var.environment}/secret-sauce" + # } + } +} diff --git a/infra/app/app-config/env-config/file_upload_jobs.tf b/infra/app/app-config/env-config/file_upload_jobs.tf new file mode 100644 index 00000000..ef61a89f --- /dev/null +++ b/infra/app/app-config/env-config/file_upload_jobs.tf @@ -0,0 +1,14 @@ +locals { + # Configuration for default jobs to run in every environment. + # See description of `file_upload_jobs` variable in the service module (infra/modules/service/variables.tf) + # for the structure of this configuration object. + # One difference is that `source_bucket` is optional here. If `source_bucket` is not + # specified, then the source bucket will be set to the storage bucket's name + file_upload_jobs = { + # Example job configuration + # etl = { + # path_prefix = "etl/input", + # task_command = ["python", "-m", "flask", "--app", "app.py", "etl", ""] + # } + } +} diff --git a/infra/app/app-config/env-config/main.tf b/infra/app/app-config/env-config/main.tf new file mode 100644 index 00000000..3eebe7a2 --- /dev/null +++ b/infra/app/app-config/env-config/main.tf @@ -0,0 +1,8 @@ +locals { + # The prefix key/value pair is used for Terraform Workspaces, which is useful for projects with multiple infrastructure developers. + # By default, Terraform creates a workspace named โ€œdefault.โ€ If a non-default workspace is not created this prefix will equal โ€œdefaultโ€, + # if you choose not to use workspaces set this value to "dev" + prefix = terraform.workspace == "default" ? "" : "${terraform.workspace}-" + + bucket_name = "${local.prefix}${var.project_name}-${var.app_name}-${var.environment}" +} diff --git a/infra/app/app-config/env-config/outputs.tf b/infra/app/app-config/env-config/outputs.tf new file mode 100644 index 00000000..5e10aae7 --- /dev/null +++ b/infra/app/app-config/env-config/outputs.tf @@ -0,0 +1,51 @@ +output "account_name" { + value = var.account_name + description = "Name of the AWS account that contains the resources for the application environment." +} + +output "database_config" { + value = local.database_config +} + +output "network_name" { + value = var.network_name +} + +output "service_config" { + value = { + service_name = "${local.prefix}${var.app_name}-${var.environment}" + domain_name = var.domain_name + enable_https = var.enable_https + region = var.default_region + cpu = var.service_cpu + memory = var.service_memory + desired_instance_count = var.service_desired_instance_count + enable_command_execution = var.enable_command_execution + + extra_environment_variables = merge( + local.default_extra_environment_variables, + var.service_override_extra_environment_variables + ) + + secrets = local.secrets + + file_upload_jobs = { + for job_name, job_config in local.file_upload_jobs : + # For job configs that don't define a source_bucket, add the source_bucket config property + job_name => merge({ source_bucket = local.bucket_name }, job_config) + } + } +} + +output "storage_config" { + value = { + # Include project name in bucket name since buckets need to be globally unique across AWS + bucket_name = local.bucket_name + } +} + +output "incident_management_service_integration" { + value = var.has_incident_management_service ? { + integration_url_param_name = "/monitoring/${var.app_name}/${var.environment}/incident-management-integration-url" + } : null +} diff --git a/infra/app/app-config/env-config/variables.tf b/infra/app/app-config/env-config/variables.tf new file mode 100644 index 00000000..60356f90 --- /dev/null +++ b/infra/app/app-config/env-config/variables.tf @@ -0,0 +1,89 @@ +variable "project_name" { + type = string +} + +variable "app_name" { + type = string +} + +variable "environment" { + description = "name of the application environment (e.g. dev, staging, prod)" + type = string +} + +variable "account_name" { + description = <..s3.tfbackend + Provide the ACCOUNT_NAME for this variable. + EOT + type = string +} + +variable "network_name" { + description = "Human readable identifier of the network / VPC" + type = string +} + +variable "default_region" { + description = "default region for the project" + type = string +} + +variable "domain_name" { + type = string + description = "The fully qualified domain name for the application" + default = null +} + +variable "enable_https" { + type = bool + description = "Whether to enable HTTPS for the application" + default = false +} + +variable "certificate_arn" { + type = string + description = "The ARN of the certificate to use for the application" + default = null +} + +variable "has_database" { + type = bool +} + +variable "has_incident_management_service" { + type = bool +} + +variable "service_cpu" { + type = number + default = 256 +} + +variable "service_memory" { + type = number + default = 512 +} + +variable "service_desired_instance_count" { + type = number + default = 1 +} + +variable "service_override_extra_environment_variables" { + type = map(string) + description = <..s3.tfbackend + shared_account_name = "dev" +} + +module "project_config" { + source = "../../project-config" +} diff --git a/infra/app/app-config/outputs.tf b/infra/app/app-config/outputs.tf new file mode 100644 index 00000000..ac79190d --- /dev/null +++ b/infra/app/app-config/outputs.tf @@ -0,0 +1,49 @@ +output "app_name" { + value = local.app_name +} + +output "account_names_by_environment" { + value = merge( + { + for environment, environment_config in local.environment_configs : + environment => environment_config.account_name + }, + { shared = local.shared_account_name }, + ) +} + +output "environments" { + value = local.environments +} + +output "feature_flags" { + value = local.feature_flags +} + +output "has_database" { + value = local.has_database +} + +output "has_external_non_aws_service" { + value = local.has_external_non_aws_service +} + +output "has_incident_management_service" { + value = local.has_incident_management_service +} + +output "image_repository_name" { + value = local.image_repository_name +} + +output "build_repository_config" { + value = local.build_repository_config +} + +output "environment_configs" { + value = local.environment_configs +} + +output "shared_account_name" { + value = local.shared_account_name +} diff --git a/infra/app/app-config/prod.tf b/infra/app/app-config/prod.tf new file mode 100644 index 00000000..386eb080 --- /dev/null +++ b/infra/app/app-config/prod.tf @@ -0,0 +1,25 @@ +module "prod_config" { + source = "./env-config" + project_name = local.project_name + app_name = local.app_name + default_region = module.project_config.default_region + environment = "prod" + account_name = "prod" + network_name = "prod" + domain_name = null + enable_https = false + has_database = local.has_database + has_incident_management_service = local.has_incident_management_service + + # These numbers are a starting point based on this article + # Update the desired instance size and counts based on the project's specific needs + # https://conchchow.medium.com/aws-ecs-fargate-compute-capacity-planning-a5025cb40bd0 + service_cpu = 1024 + service_memory = 4096 + service_desired_instance_count = 3 + + # Enables ECS Exec access for debugging or jump access. + # Defaults to `false`. Uncomment the next line to enable. + # โš ๏ธ Warning! It is not recommended to enable this in a production environment. + # enable_command_execution = true +} diff --git a/infra/app/app-config/staging.tf b/infra/app/app-config/staging.tf new file mode 100644 index 00000000..ec46f3f8 --- /dev/null +++ b/infra/app/app-config/staging.tf @@ -0,0 +1,18 @@ +module "staging_config" { + source = "./env-config" + project_name = local.project_name + app_name = local.app_name + default_region = module.project_config.default_region + environment = "staging" + account_name = "staging" + network_name = "staging" + domain_name = null + enable_https = false + has_database = local.has_database + has_incident_management_service = local.has_incident_management_service + + # Enables ECS Exec access for debugging or jump access. + # See https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-exec.html + # Defaults to `false`. Uncomment the next line to enable. + # enable_command_execution = true +} diff --git a/infra/app/build-repository/main.tf b/infra/app/build-repository/main.tf new file mode 100644 index 00000000..fd92bc38 --- /dev/null +++ b/infra/app/build-repository/main.tf @@ -0,0 +1,59 @@ +data "aws_iam_role" "github_actions" { + name = module.project_config.github_actions_role_name +} + +locals { + # Set project tags that will be used to tag all resources. + tags = merge(module.project_config.default_tags, { + application = module.app_config.app_name + application_role = "build-repository" + description = "Backend resources required for storing built release candidate artifacts to be used for deploying to environments." + }) + + # Get list of AWS account ids for the application environments that + # will need access to the build repository + app_account_names = values(module.app_config.account_names_by_environment) + account_ids_by_name = data.external.account_ids_by_name.result + app_account_ids = [for account_name in local.app_account_names : local.account_ids_by_name[account_name] if contains(keys(local.account_ids_by_name), account_name)] +} + +terraform { + required_version = "~>1.8.0" + + required_providers { + aws = { + source = "hashicorp/aws" + version = "~>4.20.1" + } + } + + backend "s3" { + encrypt = "true" + } +} + +provider "aws" { + region = module.app_config.build_repository_config.region + default_tags { + tags = local.tags + } +} + +module "project_config" { + source = "../../project-config" +} + +module "app_config" { + source = "../app-config" +} + +data "external" "account_ids_by_name" { + program = ["../../../bin/account-ids-by-name"] +} + +module "container_image_repository" { + source = "../../modules/container-image-repository" + name = module.app_config.image_repository_name + push_access_role_arn = data.aws_iam_role.github_actions.arn + app_account_ids = local.app_account_ids +} diff --git a/infra/app/database/main.tf b/infra/app/database/main.tf new file mode 100644 index 00000000..faf4e917 --- /dev/null +++ b/infra/app/database/main.tf @@ -0,0 +1,92 @@ +data "aws_vpc" "network" { + tags = { + project = module.project_config.project_name + network_name = local.environment_config.network_name + } +} + +data "aws_subnets" "database" { + tags = { + project = module.project_config.project_name + network_name = local.environment_config.network_name + subnet_type = "database" + } +} + +locals { + # The prefix key/value pair is used for Terraform Workspaces, which is useful for projects with multiple infrastructure developers. + # By default, Terraform creates a workspace named โ€œdefault.โ€ If a non-default workspace is not created this prefix will equal โ€œdefaultโ€, + # if you choose not to use workspaces set this value to "dev" + prefix = terraform.workspace == "default" ? "" : "${terraform.workspace}-" + + # Add environment specific tags + tags = merge(module.project_config.default_tags, { + environment = var.environment_name + description = "Database resources for the ${var.environment_name} environment" + }) + + environment_config = module.app_config.environment_configs[var.environment_name] + database_config = local.environment_config.database_config + network_config = module.project_config.network_configs[local.environment_config.network_name] +} + +terraform { + required_version = "~>1.8.0" + + required_providers { + aws = { + source = "hashicorp/aws" + version = "~>4.67.0" + } + } + + backend "s3" { + encrypt = "true" + } +} + +provider "aws" { + region = local.database_config.region + default_tags { + tags = local.tags + } +} + +module "project_config" { + source = "../../project-config" +} + +module "app_config" { + source = "../app-config" +} + +data "aws_security_groups" "aws_services" { + filter { + name = "group-name" + values = ["${module.project_config.aws_services_security_group_name_prefix}*"] + } + + filter { + name = "vpc-id" + values = [data.aws_vpc.network.id] + } +} + +module "database" { + source = "../../modules/database" + + name = "${local.prefix}${local.database_config.cluster_name}" + app_access_policy_name = "${local.prefix}${local.database_config.app_access_policy_name}" + migrator_access_policy_name = "${local.prefix}${local.database_config.migrator_access_policy_name}" + + # The following are not AWS infra resources and therefore do not need to be + # isolated via the terraform workspace prefix + app_username = local.database_config.app_username + migrator_username = local.database_config.migrator_username + schema_name = local.database_config.schema_name + + vpc_id = data.aws_vpc.network.id + database_subnet_group_name = local.network_config.database_subnet_group_name + private_subnet_ids = data.aws_subnets.database.ids + aws_services_security_group_id = data.aws_security_groups.aws_services.ids[0] +} diff --git a/infra/app/database/outputs.tf b/infra/app/database/outputs.tf new file mode 100644 index 00000000..927b820a --- /dev/null +++ b/infra/app/database/outputs.tf @@ -0,0 +1,3 @@ +output "role_manager_function_name" { + value = module.database.role_manager_function_name +} diff --git a/infra/app/database/variables.tf b/infra/app/database/variables.tf new file mode 100644 index 00000000..c142bdf9 --- /dev/null +++ b/infra/app/database/variables.tf @@ -0,0 +1,4 @@ +variable "environment_name" { + type = string + description = "name of the application environment" +} diff --git a/infra/app/service/image_tag.tf b/infra/app/service/image_tag.tf new file mode 100644 index 00000000..ce1bc75c --- /dev/null +++ b/infra/app/service/image_tag.tf @@ -0,0 +1,56 @@ +# Make the "image_tag" variable optional so that "terraform plan" +# and "terraform apply" work without any required variables. +# +# This works as follows: + +# 1. Accept an optional variable during a terraform plan/apply. (see "image_tag" variable in variables.tf) + +# 2. Read the output used from the last terraform state using "terraform_remote_state". +# Get the backend config by parsing the backend config file +locals { + backend_config_file_path = "${path.module}/${var.environment_name}.s3.tfbackend" + backend_config_file = file("${path.module}/${var.environment_name}.s3.tfbackend") + + # Use regex to parse backend config file to get a map of variables to their + # defined values since there is no built-in terraform function that does that + # + # The backend config file consists of lines that look like + # = " match[1] } + tfstate_bucket = local.backend_config["bucket"] + tfstate_key = local.backend_config["key"] +} +data "terraform_remote_state" "current_image_tag" { + # Don't do a lookup if image_tag is provided explicitly. + # This saves some time and also allows us to do a first deploy, + # where the tfstate file does not yet exist. + count = var.image_tag == null ? 1 : 0 + backend = "s3" + + config = { + bucket = local.tfstate_bucket + key = local.tfstate_key + region = local.service_config.region + } + + defaults = { + image_tag = null + } +} + +# 3. Prefer the given variable if provided, otherwise default to the value from last time. +locals { + image_tag = (var.image_tag == null + ? data.terraform_remote_state.current_image_tag[0].outputs.image_tag + : var.image_tag) +} + +# 4. Store the final value used as a terraform output for next time. +output "image_tag" { + value = local.image_tag +} diff --git a/infra/app/service/main.tf b/infra/app/service/main.tf new file mode 100644 index 00000000..f06cc2d0 --- /dev/null +++ b/infra/app/service/main.tf @@ -0,0 +1,193 @@ +data "aws_vpc" "network" { + tags = { + project = module.project_config.project_name + network_name = local.environment_config.network_name + } +} + +data "aws_subnets" "public" { + tags = { + project = module.project_config.project_name + network_name = local.environment_config.network_name + subnet_type = "public" + } +} + +data "aws_subnets" "private" { + tags = { + project = module.project_config.project_name + network_name = local.environment_config.network_name + subnet_type = "private" + } +} + +locals { + # Add environment specific tags + tags = merge(module.project_config.default_tags, { + environment = var.environment_name + description = "Application resources created in ${var.environment_name} environment" + }) + + is_temporary = startswith(terraform.workspace, "t-") + + environment_config = module.app_config.environment_configs[var.environment_name] + service_config = local.environment_config.service_config + database_config = local.environment_config.database_config + storage_config = local.environment_config.storage_config + incident_management_service_integration_config = local.environment_config.incident_management_service_integration + + network_config = module.project_config.network_configs[local.environment_config.network_name] +} + +terraform { + required_version = "~>1.8.0" + + required_providers { + aws = { + source = "hashicorp/aws" + version = ">= 4.56.0, < 5.0.0" + } + } + + backend "s3" { + encrypt = "true" + } +} + +provider "aws" { + region = local.service_config.region + default_tags { + tags = local.tags + } +} + +module "project_config" { + source = "../../project-config" +} + +module "app_config" { + source = "../app-config" +} + +data "aws_rds_cluster" "db_cluster" { + count = module.app_config.has_database ? 1 : 0 + cluster_identifier = local.database_config.cluster_name +} + +data "aws_iam_policy" "app_db_access_policy" { + count = module.app_config.has_database ? 1 : 0 + name = local.database_config.app_access_policy_name +} + +data "aws_iam_policy" "migrator_db_access_policy" { + count = module.app_config.has_database ? 1 : 0 + name = local.database_config.migrator_access_policy_name +} + +# Retrieve url for external incident management tool (e.g. Pagerduty, Splunk-On-Call) + +data "aws_ssm_parameter" "incident_management_service_integration_url" { + count = module.app_config.has_incident_management_service ? 1 : 0 + name = local.incident_management_service_integration_config.integration_url_param_name +} + +data "aws_security_groups" "aws_services" { + filter { + name = "group-name" + values = ["${module.project_config.aws_services_security_group_name_prefix}*"] + } + + filter { + name = "vpc-id" + values = [data.aws_vpc.network.id] + } +} + +data "aws_acm_certificate" "certificate" { + count = local.service_config.enable_https ? 1 : 0 + domain = local.service_config.domain_name +} + +data "aws_route53_zone" "zone" { + count = local.service_config.domain_name != null ? 1 : 0 + name = local.network_config.domain_config.hosted_zone +} + +module "service" { + source = "../../modules/service" + service_name = local.service_config.service_name + + image_repository_name = module.app_config.image_repository_name + image_tag = local.image_tag + + vpc_id = data.aws_vpc.network.id + public_subnet_ids = data.aws_subnets.public.ids + private_subnet_ids = data.aws_subnets.private.ids + + domain_name = local.service_config.domain_name + hosted_zone_id = local.service_config.domain_name != null ? data.aws_route53_zone.zone[0].zone_id : null + certificate_arn = local.service_config.enable_https ? data.aws_acm_certificate.certificate[0].arn : null + + cpu = local.service_config.cpu + memory = local.service_config.memory + desired_instance_count = local.service_config.desired_instance_count + enable_command_execution = local.service_config.enable_command_execution + + aws_services_security_group_id = data.aws_security_groups.aws_services.ids[0] + + file_upload_jobs = local.service_config.file_upload_jobs + + db_vars = module.app_config.has_database ? { + security_group_ids = data.aws_rds_cluster.db_cluster[0].vpc_security_group_ids + app_access_policy_arn = data.aws_iam_policy.app_db_access_policy[0].arn + migrator_access_policy_arn = data.aws_iam_policy.migrator_db_access_policy[0].arn + connection_info = { + host = data.aws_rds_cluster.db_cluster[0].endpoint + port = data.aws_rds_cluster.db_cluster[0].port + user = local.database_config.app_username + db_name = data.aws_rds_cluster.db_cluster[0].database_name + schema_name = local.database_config.schema_name + } + } : null + + extra_environment_variables = merge({ + FEATURE_FLAGS_PROJECT = module.feature_flags.evidently_project_name + BUCKET_NAME = local.storage_config.bucket_name + }, local.service_config.extra_environment_variables) + + secrets = [ + for secret_name in keys(local.service_config.secrets) : { + name = secret_name + valueFrom = module.secrets[secret_name].secret_arn + } + ] + + extra_policies = { + feature_flags_access = module.feature_flags.access_policy_arn, + storage_access = module.storage.access_policy_arn + } + + is_temporary = local.is_temporary +} + +module "monitoring" { + source = "../../modules/monitoring" + #Email subscription list: + #email_alerts_subscription_list = ["email1@email.com", "email2@email.com"] + + # Module takes service and ALB names to link all alerts with corresponding targets + service_name = local.service_config.service_name + load_balancer_arn_suffix = module.service.load_balancer_arn_suffix + incident_management_service_integration_url = module.app_config.has_incident_management_service && !local.is_temporary ? data.aws_ssm_parameter.incident_management_service_integration_url[0].value : null +} + +module "feature_flags" { + source = "../../modules/feature-flags" + service_name = local.service_config.service_name + feature_flags = module.app_config.feature_flags +} + +module "storage" { + source = "../../modules/storage" + name = local.storage_config.bucket_name +} diff --git a/infra/app/service/outputs.tf b/infra/app/service/outputs.tf new file mode 100644 index 00000000..58711aaf --- /dev/null +++ b/infra/app/service/outputs.tf @@ -0,0 +1,24 @@ +output "service_endpoint" { + description = "The public endpoint for the service." + value = module.service.public_endpoint +} + +output "service_cluster_name" { + value = module.service.cluster_name +} + +output "service_name" { + value = local.service_config.service_name +} + +output "application_log_group" { + value = module.service.application_log_group +} + +output "application_log_stream_prefix" { + value = module.service.application_log_stream_prefix +} + +output "migrator_role_arn" { + value = module.service.migrator_role_arn +} diff --git a/infra/app/service/secrets.tf b/infra/app/service/secrets.tf new file mode 100644 index 00000000..e65eaa0c --- /dev/null +++ b/infra/app/service/secrets.tf @@ -0,0 +1,16 @@ +module "secrets" { + for_each = local.service_config.secrets + + source = "../../modules/secret" + + # When generating secrets and storing them in parameter store, append the + # terraform workspace to the secret store path if the environment is temporary + # to avoid conflicts with existing environments. + # Don't do this for secrets that are managed manually since the temporary + # environments will need to share those secrets. + secret_store_name = (each.value.manage_method == "generated" && local.is_temporary ? + "${each.value.secret_store_name}/${terraform.workspace}" : + each.value.secret_store_name + ) + manage_method = each.value.manage_method +} diff --git a/infra/app/service/variables.tf b/infra/app/service/variables.tf new file mode 100644 index 00000000..19a5f312 --- /dev/null +++ b/infra/app/service/variables.tf @@ -0,0 +1,10 @@ +variable "environment_name" { + type = string + description = "name of the application environment" +} + +variable "image_tag" { + type = string + description = "image tag to deploy to the environment" + default = null +} diff --git a/infra/example.s3.tfbackend b/infra/example.s3.tfbackend new file mode 100644 index 00000000..c71af253 --- /dev/null +++ b/infra/example.s3.tfbackend @@ -0,0 +1,4 @@ +bucket = "" +key = "" +dynamodb_table = "" +region = "" diff --git a/infra/modules/auth-github-actions/README.md b/infra/modules/auth-github-actions/README.md new file mode 100644 index 00000000..b516b053 --- /dev/null +++ b/infra/modules/auth-github-actions/README.md @@ -0,0 +1,16 @@ +# AWS Federation for GitHub Actions + +This module sets up a way for GitHub Actions to access AWS resources using short-lived credentials without requiring long-lived access keys and without requiring separate AWS identities that need to be managed. It does that by doing the following: + +1. Set up GitHub as an OpenID Connect Provider in the AWS account +2. Create an IAM role that GitHub actions will assume +3. Attach an IAM policy to the GitHub actions role that provides the necessary access to AWS account resources. By default this module will provide the [AWS managed Developer power user access policy `PowerUserAccess`](https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_job-functions.html) + +## Related Implementations + +Similar functionality is also implemented in the [oidc-github module in the Terraform Registry](https://registry.terraform.io/modules/unfunco/oidc-github/aws/latest) (see also [Nava's fork of that repo](https://github.com/navapbc/terraform-aws-oidc-github)), but since IAM is sensitive we chose to implement it ourselves to keep the module simple, easy to understand, and in a place that's within our scope of control. + +## Reference + +* [AWS - Creating OpenID Connect (OIDC) Providers](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_oidc.html) +* [GitHub - Security Hardening with OpenID Connect](https://docs.github.com/en/actions/deployment/security-hardening-your-deployments/about-security-hardening-with-openid-connect) diff --git a/infra/modules/auth-github-actions/main.tf b/infra/modules/auth-github-actions/main.tf new file mode 100644 index 00000000..d0650b88 --- /dev/null +++ b/infra/modules/auth-github-actions/main.tf @@ -0,0 +1,59 @@ +# Set up GitHub's OpenID Connect provider in AWS account +data "aws_iam_openid_connect_provider" "github" { + url = "https://token.actions.githubusercontent.com" +} + +# Create IAM role for GitHub Actions +resource "aws_iam_role" "github_actions" { + name = var.github_actions_role_name + description = "Service role required for Github Action to deploy application resources into the account." + assume_role_policy = data.aws_iam_policy_document.github_assume_role.json +} + +# Attach access policies to GitHub Actions role +resource "aws_iam_role_policy_attachment" "github_actions" { + role = aws_iam_role.github_actions.name + policy_arn = aws_iam_policy.github_actions.arn +} + +resource "aws_iam_policy" "github_actions" { + name = "${var.github_actions_role_name}-manage-infra" + description = "Allow ${var.github_actions_role_name} to manage AWS infrastructure resources" + policy = data.aws_iam_policy_document.github_actions.json +} + +data "aws_iam_policy_document" "github_actions" { + statement { + sid = "ManageInfra" + effect = "Allow" + actions = var.allowed_actions + resources = ["*"] + } +} + +# Set up assume role policy for GitHub Actions to allow GitHub actions +# running from the specified repository and branches/git refs to assume +# the role +data "aws_iam_policy_document" "github_assume_role" { + statement { + effect = "Allow" + actions = ["sts:AssumeRoleWithWebIdentity"] + + principals { + type = "Federated" + identifiers = [data.aws_iam_openid_connect_provider.github.arn] + } + + condition { + test = "StringEquals" + variable = "token.actions.githubusercontent.com:aud" + values = ["sts.amazonaws.com"] + } + + condition { + test = "StringLike" + variable = "token.actions.githubusercontent.com:sub" + values = ["repo:${var.github_repository}:*"] + } + } +} diff --git a/infra/modules/auth-github-actions/variables.tf b/infra/modules/auth-github-actions/variables.tf new file mode 100644 index 00000000..dae3d0a4 --- /dev/null +++ b/infra/modules/auth-github-actions/variables.tf @@ -0,0 +1,14 @@ +variable "github_actions_role_name" { + type = string + description = "The name to use for the IAM role GitHub actions will assume." +} + +variable "github_repository" { + type = string + description = "The GitHub repository in 'org/repo' format to provide access to AWS account resources. Example: navapbc/template-infra" +} + +variable "allowed_actions" { + type = list(string) + description = "List of IAM actions to allow GitHub Actions to perform" +} diff --git a/infra/modules/container-image-repository/main.tf b/infra/modules/container-image-repository/main.tf new file mode 100644 index 00000000..cf501c82 --- /dev/null +++ b/infra/modules/container-image-repository/main.tf @@ -0,0 +1,87 @@ +data "aws_region" "current" {} + +locals { + image_registry = "${aws_ecr_repository.app.registry_id}.dkr.ecr.${data.aws_region.current.name}.amazonaws.com" +} + +resource "aws_ecr_repository" "app" { + name = var.name + image_tag_mutability = "IMMUTABLE" + + image_scanning_configuration { + scan_on_push = true + } + + encryption_configuration { + encryption_type = "KMS" + kms_key = aws_kms_key.ecr_kms.arn + } +} + +resource "aws_ecr_repository_policy" "image_access" { + repository = aws_ecr_repository.app.name + policy = data.aws_iam_policy_document.image_access.json +} + +resource "aws_ecr_lifecycle_policy" "image_retention" { + repository = aws_ecr_repository.app.name + + policy = < 0 ? [true] : [] + content { + sid = "PullAccess" + effect = "Allow" + principals { + type = "AWS" + identifiers = [for account_id in var.app_account_ids : "arn:aws:iam::${account_id}:root"] + } + actions = [ + "ecr:BatchGetImage", + "ecr:GetDownloadUrlForLayer", + ] + } + } +} + +resource "aws_kms_key" "ecr_kms" { + enable_key_rotation = true + description = "KMS key for ECR repository ${var.name}" +} diff --git a/infra/modules/container-image-repository/outputs.tf b/infra/modules/container-image-repository/outputs.tf new file mode 100644 index 00000000..23b18ea5 --- /dev/null +++ b/infra/modules/container-image-repository/outputs.tf @@ -0,0 +1,11 @@ +output "image_registry" { + value = local.image_registry +} + +output "image_repository_name" { + value = aws_ecr_repository.app.name +} + +output "image_repository_url" { + value = aws_ecr_repository.app.repository_url +} diff --git a/infra/modules/container-image-repository/variables.tf b/infra/modules/container-image-repository/variables.tf new file mode 100644 index 00000000..1882ca2a --- /dev/null +++ b/infra/modules/container-image-repository/variables.tf @@ -0,0 +1,15 @@ +variable "name" { + type = string + description = "The name of image repository." +} + +variable "push_access_role_arn" { + type = string + description = "The ARN of the role to grant push access to the repository. Use this to grant access to the role that builds and publishes release artifacts." +} + +variable "app_account_ids" { + type = list(string) + description = "A list of account ids to grant pull access to the repository. Use this to grant access to the application environment accounts in a multi-account setup." + default = [] +} diff --git a/infra/modules/database/.gitignore b/infra/modules/database/.gitignore new file mode 100644 index 00000000..ac3e6dba --- /dev/null +++ b/infra/modules/database/.gitignore @@ -0,0 +1 @@ +/role_manager/vendor diff --git a/infra/modules/database/authentication.tf b/infra/modules/database/authentication.tf new file mode 100644 index 00000000..55156df3 --- /dev/null +++ b/infra/modules/database/authentication.tf @@ -0,0 +1,42 @@ +# Authentication +# -------------- + +resource "aws_iam_policy" "app_db_access" { + name = var.app_access_policy_name + policy = data.aws_iam_policy_document.app_db_access.json +} + +data "aws_iam_policy_document" "app_db_access" { + # Policy to allow connection to RDS via IAM database authentication + # which is more secure than traditional username/password authentication + # https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/UsingWithRDS.IAMDBAuth.IAMPolicy.html + statement { + actions = [ + "rds-db:connect" + ] + + resources = [ + "${local.db_user_arn_prefix}/${var.app_username}", + ] + } +} + +resource "aws_iam_policy" "migrator_db_access" { + name = var.migrator_access_policy_name + policy = data.aws_iam_policy_document.migrator_db_access.json +} + +data "aws_iam_policy_document" "migrator_db_access" { + # Policy to allow connection to RDS via IAM database authentication + # which is more secure than traditional username/password authentication + # https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/UsingWithRDS.IAMDBAuth.IAMPolicy.html + statement { + actions = [ + "rds-db:connect" + ] + + resources = [ + "${local.db_user_arn_prefix}/${var.migrator_username}", + ] + } +} diff --git a/infra/modules/database/backups.tf b/infra/modules/database/backups.tf new file mode 100644 index 00000000..f9c1deb2 --- /dev/null +++ b/infra/modules/database/backups.tf @@ -0,0 +1,69 @@ +# Database Backups +# ---------------- + +# Backup plan that defines when and how to backup and which backup vault to store backups in +# See https://docs.aws.amazon.com/aws-backup/latest/devguide/about-backup-plans.html +resource "aws_backup_plan" "backup_plan" { + name = "${var.name}-db-backup-plan" + + rule { + rule_name = "${var.name}-db-backup-rule" + target_vault_name = aws_backup_vault.backup_vault.name + schedule = "cron(0 7 ? * SUN *)" # Run Sundays at 12pm (EST) + } +} + +# Backup vault that stores and organizes backups +# See https://docs.aws.amazon.com/aws-backup/latest/devguide/vaults.html +resource "aws_backup_vault" "backup_vault" { + name = "${var.name}-db-backup-vault" + kms_key_arn = data.aws_kms_key.backup_vault_key.arn + + # Use a separate line to support automated terraform destroy commands + force_destroy = false +} + +# KMS Key for the vault +# This key was created by AWS by default alongside the vault +data "aws_kms_key" "backup_vault_key" { + key_id = "alias/aws/backup" +} + +# Backup selection defines which resources to backup +# See https://docs.aws.amazon.com/aws-backup/latest/devguide/assigning-resources.html +# and https://docs.aws.amazon.com/aws-backup/latest/devguide/API_BackupSelection.html +resource "aws_backup_selection" "db_backup" { + name = "${var.name}-db-backup" + plan_id = aws_backup_plan.backup_plan.id + iam_role_arn = aws_iam_role.db_backup_role.arn + + resources = [ + aws_rds_cluster.db.arn + ] +} + +# Role that AWS Backup uses to authenticate when backing up the target resource +resource "aws_iam_role" "db_backup_role" { + name_prefix = "${var.name}-db-backup-" + assume_role_policy = data.aws_iam_policy_document.db_backup_policy.json +} + +data "aws_iam_policy_document" "db_backup_policy" { + statement { + actions = [ + "sts:AssumeRole", + ] + + effect = "Allow" + + principals { + type = "Service" + identifiers = ["backup.amazonaws.com"] + } + } +} + +resource "aws_iam_role_policy_attachment" "db_backup_role_policy_attachment" { + role = aws_iam_role.db_backup_role.name + policy_arn = "arn:aws:iam::aws:policy/service-role/AWSBackupServiceRolePolicyForBackup" +} diff --git a/infra/modules/database/main.tf b/infra/modules/database/main.tf new file mode 100644 index 00000000..7b145d2a --- /dev/null +++ b/infra/modules/database/main.tf @@ -0,0 +1,97 @@ +data "aws_caller_identity" "current" {} +data "aws_region" "current" {} + +locals { + master_username = "postgres" + primary_instance_name = "${var.name}-primary" + role_manager_name = "${var.name}-role-manager" + role_manager_package = "${path.root}/role_manager.zip" + # The ARN that represents the users accessing the database are of the format: "arn:aws:rds-db:::dbuser:/"" + # See https://aws.amazon.com/blogs/database/using-iam-authentication-to-connect-with-pgadmin-amazon-aurora-postgresql-or-amazon-rds-for-postgresql/ + db_user_arn_prefix = "arn:aws:rds-db:${data.aws_region.current.name}:${data.aws_caller_identity.current.account_id}:dbuser:${aws_rds_cluster.db.cluster_resource_id}" + + engine_version = "16.2" + engine_major_version = regex("^\\d+", local.engine_version) +} + +# Database Configuration +# ---------------------- + +resource "aws_rds_cluster" "db" { + # checkov:skip=CKV2_AWS_27:have concerns about sensitive data in logs; want better way to get this information + # checkov:skip=CKV2_AWS_8:TODO add backup selection plan using tags + + # cluster identifier is a unique identifier within the AWS account + # https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/Aurora.CreateInstance.html + cluster_identifier = var.name + + engine = "aurora-postgresql" + engine_mode = "provisioned" + engine_version = local.engine_version + database_name = var.database_name + port = var.port + master_username = local.master_username + manage_master_user_password = true + storage_encrypted = true + kms_key_id = aws_kms_key.db.arn + allow_major_version_upgrade = false + + db_cluster_parameter_group_name = aws_rds_cluster_parameter_group.rds_query_logging.name + + # checkov:skip=CKV_AWS_128:Auth decision needs to be ironed out + # checkov:skip=CKV_AWS_162:Auth decision needs to be ironed out + iam_database_authentication_enabled = true + copy_tags_to_snapshot = true + # final_snapshot_identifier = "${var.name}-final" + skip_final_snapshot = true + + # Use a separate line to support automated terraform destroy commands + deletion_protection = true + + serverlessv2_scaling_configuration { + max_capacity = 4.0 + min_capacity = 2.0 + } + + db_subnet_group_name = var.database_subnet_group_name + vpc_security_group_ids = [aws_security_group.db.id] + + enabled_cloudwatch_logs_exports = ["postgresql"] +} + +resource "aws_rds_cluster_instance" "primary" { + identifier = local.primary_instance_name + cluster_identifier = aws_rds_cluster.db.id + instance_class = "db.serverless" + engine = aws_rds_cluster.db.engine + engine_version = aws_rds_cluster.db.engine_version + auto_minor_version_upgrade = true + monitoring_role_arn = aws_iam_role.rds_enhanced_monitoring.arn + monitoring_interval = 30 +} + +resource "aws_kms_key" "db" { + description = "Key for RDS cluster ${var.name}" + enable_key_rotation = true +} + +# Query Logging +# ------------- + +resource "aws_rds_cluster_parameter_group" "rds_query_logging" { + name = "${var.name}-${local.engine_major_version}" + family = "aurora-postgresql${local.engine_major_version}" + description = "Default cluster parameter group" + + parameter { + name = "log_statement" + # Logs data definition statements (e.g. DROP, ALTER, CREATE) + value = "ddl" + } + + parameter { + name = "log_min_duration_statement" + # Logs all statements that run 100ms or longer + value = "100" + } +} diff --git a/infra/modules/database/monitoring.tf b/infra/modules/database/monitoring.tf new file mode 100644 index 00000000..788af863 --- /dev/null +++ b/infra/modules/database/monitoring.tf @@ -0,0 +1,28 @@ +#----------------------------------# +# IAM role for enhanced monitoring # +#----------------------------------# + +resource "aws_iam_role" "rds_enhanced_monitoring" { + name_prefix = "${var.name}-db-monitor-" + assume_role_policy = data.aws_iam_policy_document.rds_enhanced_monitoring.json +} + +resource "aws_iam_role_policy_attachment" "rds_enhanced_monitoring" { + role = aws_iam_role.rds_enhanced_monitoring.name + policy_arn = "arn:aws:iam::aws:policy/service-role/AmazonRDSEnhancedMonitoringRole" +} + +data "aws_iam_policy_document" "rds_enhanced_monitoring" { + statement { + actions = [ + "sts:AssumeRole", + ] + + effect = "Allow" + + principals { + type = "Service" + identifiers = ["monitoring.rds.amazonaws.com"] + } + } +} diff --git a/infra/modules/database/networking.tf b/infra/modules/database/networking.tf new file mode 100644 index 00000000..afc0e46e --- /dev/null +++ b/infra/modules/database/networking.tf @@ -0,0 +1,54 @@ +# Network Configuration +# --------------------- + +resource "aws_security_group" "db" { + name_prefix = "${var.name}-db" + description = "Database layer security group" + vpc_id = var.vpc_id +} + +resource "aws_security_group" "role_manager" { + name_prefix = "${var.name}-role-manager" + description = "Database role manager security group" + vpc_id = var.vpc_id +} + +resource "aws_vpc_security_group_egress_rule" "role_manager_egress_to_db" { + security_group_id = aws_security_group.role_manager.id + description = "Allow role manager to access database" + + from_port = 5432 + to_port = 5432 + ip_protocol = "tcp" + referenced_security_group_id = aws_security_group.db.id +} + +resource "aws_vpc_security_group_ingress_rule" "db_ingress_from_role_manager" { + security_group_id = aws_security_group.db.id + description = "Allow inbound requests to database from role manager" + + from_port = 5432 + to_port = 5432 + ip_protocol = "tcp" + referenced_security_group_id = aws_security_group.role_manager.id +} + +resource "aws_vpc_security_group_egress_rule" "role_manager_egress_to_vpc_endpoints" { + security_group_id = aws_security_group.role_manager.id + description = "Allow outbound requests from role manager to VPC endpoints" + + from_port = 443 + to_port = 443 + ip_protocol = "tcp" + referenced_security_group_id = var.aws_services_security_group_id +} + +resource "aws_vpc_security_group_ingress_rule" "vpc_endpoints_ingress_from_role_manager" { + security_group_id = var.aws_services_security_group_id + description = "Allow inbound requests to VPC endpoints from role manager" + + from_port = 443 + to_port = 443 + ip_protocol = "tcp" + referenced_security_group_id = aws_security_group.role_manager.id +} diff --git a/infra/modules/database/outputs.tf b/infra/modules/database/outputs.tf new file mode 100644 index 00000000..1e20d746 --- /dev/null +++ b/infra/modules/database/outputs.tf @@ -0,0 +1,3 @@ +output "role_manager_function_name" { + value = aws_lambda_function.role_manager.function_name +} diff --git a/infra/modules/database/role-manager.tf b/infra/modules/database/role-manager.tf new file mode 100644 index 00000000..e26a4d59 --- /dev/null +++ b/infra/modules/database/role-manager.tf @@ -0,0 +1,155 @@ +# Role Manager Lambda Function +# ---------------------------- +# +# Resources for the lambda function that is used for managing database roles +# This includes creating and granting permissions to roles +# as well as viewing existing roles + +locals { + db_password_param_name = "/aws/reference/secretsmanager/${data.aws_secretsmanager_secret.db_password.name}" +} + +resource "aws_lambda_function" "role_manager" { + function_name = local.role_manager_name + + filename = local.role_manager_package + source_code_hash = data.archive_file.role_manager.output_base64sha256 + runtime = "python3.9" + handler = "role_manager.lambda_handler" + role = aws_iam_role.role_manager.arn + kms_key_arn = aws_kms_key.role_manager.arn + + # Only allow 1 concurrent execution at a time + reserved_concurrent_executions = 1 + + vpc_config { + subnet_ids = var.private_subnet_ids + security_group_ids = [aws_security_group.role_manager.id] + } + + environment { + variables = { + DB_HOST = aws_rds_cluster.db.endpoint + DB_PORT = aws_rds_cluster.db.port + DB_USER = local.master_username + DB_NAME = aws_rds_cluster.db.database_name + DB_PASSWORD_PARAM_NAME = local.db_password_param_name + DB_PASSWORD_SECRET_ARN = aws_rds_cluster.db.master_user_secret[0].secret_arn + DB_SCHEMA = var.schema_name + APP_USER = var.app_username + MIGRATOR_USER = var.migrator_username + PYTHONPATH = "vendor" + } + } + + # Ensure AWS Lambda functions with tracing are enabled + # https://docs.bridgecrew.io/docs/bc_aws_serverless_4 + tracing_config { + mode = "Active" + } + timeout = 30 + # checkov:skip=CKV_AWS_272:TODO(https://github.com/navapbc/template-infra/issues/283) + + # checkov:skip=CKV_AWS_116:Dead letter queue (DLQ) configuration is only relevant for asynchronous invocations +} + +# Installs python packages needed by the role manager lambda function before +# creating the zip archive. +# Runs pip install on every apply so that the role manager archive file that +# is generated locally is guaranteed to have the required dependencies even +# when terraform is run by a developer that did not originally create the +# environment. +# Timestamp is used to always trigger replacement. +resource "terraform_data" "role_manager_python_vendor_packages" { + triggers_replace = fileexists(local.role_manager_package) ? file("${path.module}/role_manager/requirements.txt") : timestamp() + provisioner "local-exec" { + command = "pip3 install -r ${path.module}/role_manager/requirements.txt -t ${path.module}/role_manager/vendor --upgrade" + } +} + +data "archive_file" "role_manager" { + type = "zip" + source_dir = "${path.module}/role_manager" + output_path = local.role_manager_package + depends_on = [terraform_data.role_manager_python_vendor_packages] +} + +data "aws_kms_key" "default_ssm_key" { + key_id = "alias/aws/ssm" +} + +# KMS key used to encrypt role manager's environment variables +resource "aws_kms_key" "role_manager" { + description = "Key for Lambda function ${local.role_manager_name}" + enable_key_rotation = true +} + +data "aws_secretsmanager_secret" "db_password" { + # master_user_secret is available when aws_rds_cluster.db.manage_master_user_password + # is true (see https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/rds_cluster#master_user_secret) + arn = aws_rds_cluster.db.master_user_secret[0].secret_arn +} + +# IAM for Role Manager lambda function +resource "aws_iam_role" "role_manager" { + name = "${var.name}-manager" + assume_role_policy = data.aws_iam_policy_document.role_manager_assume_role.json + managed_policy_arns = [ + data.aws_iam_policy.lambda_vpc_access.arn, + + # Grant the role manager access to the DB as app and migrator users + # so that it can performance database checks. This is needed by + # the infra database tests + aws_iam_policy.app_db_access.arn, + aws_iam_policy.migrator_db_access.arn + ] +} + + + +resource "aws_iam_role_policy" "role_manager_access_to_db_password" { + name = "${var.name}-role-manager-ssm-access" + role = aws_iam_role.role_manager.id + + policy = jsonencode({ + Version = "2012-10-17" + Statement = [ + { + Effect = "Allow" + Action = ["kms:Decrypt"] + Resource = [data.aws_kms_key.default_ssm_key.arn] + }, + { + Effect = "Allow" + Action = ["secretsmanager:GetSecretValue"] + Resource = [data.aws_secretsmanager_secret.db_password.arn] + }, + { + Effect = "Allow" + Action = ["ssm:GetParameter"] + Resource = [ + "arn:aws:ssm:${data.aws_region.current.name}:${data.aws_caller_identity.current.id}:parameter${local.db_password_param_name}" + ] + } + ] + }) +} + +data "aws_iam_policy_document" "role_manager_assume_role" { + statement { + effect = "Allow" + + principals { + type = "Service" + identifiers = ["lambda.amazonaws.com"] + } + + actions = ["sts:AssumeRole"] + } +} + +# AWS managed policy required by Lambda functions in order to access VPC resources +# see https://docs.aws.amazon.com/lambda/latest/dg/configuration-vpc.html +data "aws_iam_policy" "lambda_vpc_access" { + name = "AWSLambdaVPCAccessExecutionRole" +} diff --git a/infra/modules/database/role_manager/check.py b/infra/modules/database/role_manager/check.py new file mode 100644 index 00000000..32ac3856 --- /dev/null +++ b/infra/modules/database/role_manager/check.py @@ -0,0 +1,63 @@ +import os + +from pg8000.native import Connection, literal + +import db + + +def check(config: dict): + """Check that database roles, schema, and privileges were + properly configured + """ + print("Running command 'check' to check database roles, schema, and privileges") + app_username = os.environ.get("APP_USER") + migrator_username = os.environ.get("MIGRATOR_USER") + schema_name = os.environ.get("DB_SCHEMA") + + with ( + db.connect_using_iam(app_username) as app_conn, + db.connect_using_iam(migrator_username) as migrator_conn, + ): + check_search_path(migrator_conn, schema_name) + check_migrator_create_table(migrator_conn) + check_app_use_table(app_conn) + check_superuser_extensions(app_conn, config["superuser_extensions"]) + cleanup_migrator_drop_table(migrator_conn) + + return {"success": True} + + +def check_search_path(migrator_conn: Connection, schema_name: str): + print(f"-- Check that search path is {schema_name}") + assert db.execute(migrator_conn, "SHOW search_path") == [[schema_name]] + + +def check_migrator_create_table(migrator_conn: Connection): + print("-- Check that migrator is able to create tables") + cleanup_migrator_drop_table(migrator_conn) + db.execute( + migrator_conn, + "CREATE TABLE IF NOT EXISTS role_manager_test(created_at TIMESTAMP)", + ) + + +def check_app_use_table(app_conn: Connection): + app_username = app_conn.user.decode("utf-8") + print(f"-- Check that {app_username} is able to read and write from the table") + db.execute(app_conn, "INSERT INTO role_manager_test (created_at) VALUES (NOW())") + db.execute(app_conn, "SELECT * FROM role_manager_test") + + +def check_superuser_extensions(app_conn: Connection, superuser_extensions: dict): + def to_str(enabled): + return "enabled" if enabled else "disabled" + + for extension, should_be_enabled in superuser_extensions.items(): + print(f"-- Check that {extension} extension is {to_str(should_be_enabled)}") + result = db.execute(app_conn, f"SELECT * FROM pg_extension WHERE extname={literal(extension)}") + is_enabled = len(result) > 0 + assert should_be_enabled == is_enabled + +def cleanup_migrator_drop_table(migrator_conn: Connection): + print("-- Clean up role_manager_test table if it exists") + db.execute(migrator_conn, "DROP TABLE IF EXISTS role_manager_test") diff --git a/infra/modules/database/role_manager/db.py b/infra/modules/database/role_manager/db.py new file mode 100644 index 00000000..ea751634 --- /dev/null +++ b/infra/modules/database/role_manager/db.py @@ -0,0 +1,56 @@ +import json +import os + +import boto3 +from pg8000.native import Connection, identifier + + +def connect_as_master_user() -> Connection: + user = os.environ["DB_USER"] + host = os.environ["DB_HOST"] + port = os.environ["DB_PORT"] + database = os.environ["DB_NAME"] + password = get_master_password() + + print(f"Connecting to database: {user=} {host=} {port=} {database=}") + return Connection( + user=user, + host=host, + port=port, + database=database, + password=password, + ssl_context=True, + ) + + +def get_master_password() -> str: + ssm = boto3.client("ssm", region_name=os.environ["AWS_REGION"]) + param_name = os.environ["DB_PASSWORD_PARAM_NAME"] + print(f"Fetching password from parameter store:\n{param_name}") + result = json.loads( + ssm.get_parameter(Name=param_name, WithDecryption=True)["Parameter"]["Value"] + ) + return result["password"] + + +def connect_using_iam(user: str) -> Connection: + client = boto3.client("rds") + host = os.environ["DB_HOST"] + port = os.environ["DB_PORT"] + database = os.environ["DB_NAME"] + token = client.generate_db_auth_token(DBHostname=host, Port=port, DBUsername=user) + print(f"Connecting to database: {user=} {host=} {port=} {database=}") + return Connection( + user=user, + host=host, + port=port, + database=database, + password=token, + ssl_context=True, + ) + + +def execute(conn: Connection, query: str, print_query: bool = True): + if print_query: + print(f"{conn.user.decode('utf-8')}> {query}") + return conn.run(query) diff --git a/infra/modules/database/role_manager/manage.py b/infra/modules/database/role_manager/manage.py new file mode 100644 index 00000000..c2fd2697 --- /dev/null +++ b/infra/modules/database/role_manager/manage.py @@ -0,0 +1,216 @@ +import itertools +import os +from operator import itemgetter + +from pg8000.native import Connection, identifier + +import db + + +def manage(config: dict): + """Manage database roles, schema, and privileges""" + + print( + "-- Running command 'manage' to manage database roles, schema, and privileges" + ) + with db.connect_as_master_user() as master_conn: + print_current_db_config(master_conn) + configure_database(master_conn, config) + roles, schema_privileges = print_current_db_config(master_conn) + roles_with_groups = get_roles_with_groups(master_conn) + + configure_default_privileges() + + return { + "roles": roles, + "roles_with_groups": roles_with_groups, + "schema_privileges": { + schema_name: schema_acl for schema_name, schema_acl in schema_privileges + }, + } + + +def get_roles(conn: Connection) -> list[str]: + return [ + row[0] + for row in db.execute( + conn, + "SELECT rolname " + "FROM pg_roles " + "WHERE rolname NOT LIKE 'pg_%' " + "AND rolname NOT LIKE 'rds%'", + print_query=False, + ) + ] + + +def get_roles_with_groups(conn: Connection) -> dict[str, str]: + roles_groups = db.execute( + conn, + """ + SELECT u.rolname AS user, g.rolname AS group + FROM pg_roles u + INNER JOIN pg_auth_members a ON u.oid = a.member + INNER JOIN pg_roles g ON g.oid = a.roleid + ORDER BY user ASC + """, + print_query=False, + ) + + result = {} + for user, groups in itertools.groupby(roles_groups, itemgetter(0)): + result[user] = ",".join(map(itemgetter(1), groups)) + return result + + +# Get schema access control lists. The format of the ACLs is abbreviated. To interpret +# what the ACLs mean, see the Postgres documentation on Privileges: +# https://www.postgresql.org/docs/current/ddl-priv.html +def get_schema_privileges(conn: Connection) -> list[tuple[str, str]]: + return [ + (row[0], row[1]) + for row in db.execute( + conn, + """ + SELECT nspname, nspacl + FROM pg_namespace + WHERE nspname NOT LIKE 'pg_%' + AND nspname <> 'information_schema' + """, + print_query=False, + ) + ] + + +def configure_database(conn: Connection, config: dict) -> None: + print("-- Configuring database") + app_username = os.environ.get("APP_USER") + migrator_username = os.environ.get("MIGRATOR_USER") + schema_name = os.environ.get("DB_SCHEMA") + database_name = os.environ.get("DB_NAME") + + # In Postgres 15 and higher, the CREATE privilege on the public + # schema is already revoked/removed from all users except the + # database owner. However, we are explicitly revoking access anyways + # for projects that wish to use earlier versions of Postgres. + print("---- Revoking default access on public schema") + db.execute(conn, "REVOKE CREATE ON SCHEMA public FROM PUBLIC") + + print("---- Revoking database access from public role") + db.execute(conn, f"REVOKE ALL ON DATABASE {identifier(database_name)} FROM PUBLIC") + print(f"---- Setting default search path to schema {schema_name}") + db.execute( + conn, + f"ALTER DATABASE {identifier(database_name)} SET search_path TO {identifier(schema_name)}", + ) + + configure_roles(conn, [migrator_username, app_username], database_name) + configure_schema(conn, schema_name, migrator_username, app_username) + configure_superuser_extensions(conn, config["superuser_extensions"]) + +def configure_roles(conn: Connection, roles: list[str], database_name: str) -> None: + print("---- Configuring roles") + for role in roles: + configure_role(conn, role, database_name) + + +def configure_role(conn: Connection, username: str, database_name: str) -> None: + print(f"------ Configuring role: {username=}") + role = "rds_iam" + db.execute( + conn, + f""" + DO $$ + BEGIN + CREATE USER {identifier(username)}; + EXCEPTION WHEN DUPLICATE_OBJECT THEN + RAISE NOTICE 'user already exists'; + END + $$; + """, + ) + db.execute(conn, f"GRANT {identifier(role)} TO {identifier(username)}") + db.execute( + conn, + f"GRANT CONNECT ON DATABASE {identifier(database_name)} TO {identifier(username)}", + ) + + +def configure_schema( + conn: Connection, schema_name: str, migrator_username: str, app_username: str +) -> None: + print("---- Configuring schema") + print(f"------ Creating schema: {schema_name=}") + db.execute(conn, f"CREATE SCHEMA IF NOT EXISTS {identifier(schema_name)}") + print(f"------ Changing schema owner: new_owner={migrator_username}") + db.execute( + conn, + f"ALTER SCHEMA {identifier(schema_name)} OWNER TO {identifier(migrator_username)}", + ) + print(f"------ Granting schema usage privileges: grantee={app_username}") + db.execute( + conn, + f"GRANT USAGE ON SCHEMA {identifier(schema_name)} TO {identifier(app_username)}", + ) + + +def configure_default_privileges(): + """ + Configure default privileges so that future tables, sequences, and routines + created by the migrator user can be accessed by the app user. + You can only alter default privileges for the current role, so we need to + run these SQL queries as the migrator user rather than as the master user. + """ + migrator_username = os.environ.get("MIGRATOR_USER") + schema_name = os.environ.get("DB_SCHEMA") + app_username = os.environ.get("APP_USER") + with db.connect_using_iam(migrator_username) as conn: + print( + f"------ Granting privileges for future objects in schema: grantee={app_username}" + ) + db.execute( + conn, + f"ALTER DEFAULT PRIVILEGES IN SCHEMA {identifier(schema_name)} GRANT ALL ON TABLES TO {identifier(app_username)}", + ) + db.execute( + conn, + f"ALTER DEFAULT PRIVILEGES IN SCHEMA {identifier(schema_name)} GRANT ALL ON SEQUENCES TO {identifier(app_username)}", + ) + db.execute( + conn, + f"ALTER DEFAULT PRIVILEGES IN SCHEMA {identifier(schema_name)} GRANT ALL ON ROUTINES TO {identifier(app_username)}", + ) + + +def print_current_db_config( + conn: Connection, +) -> tuple[list[str], list[tuple[str, str]]]: + print("-- Current database configuration") + roles = get_roles(conn) + print_roles(roles) + schema_privileges = get_schema_privileges(conn) + print_schema_privileges(schema_privileges) + return roles, schema_privileges + + +def print_roles(roles: list[str]) -> None: + print("---- Roles") + for role in roles: + print(f"------ Role {role}") + + +def print_schema_privileges(schema_privileges: list[tuple[str, str]]) -> None: + print("---- Schema privileges") + for name, acl in schema_privileges: + print(f"------ Schema {name=} {acl=}") + + +def configure_superuser_extensions(conn: Connection, superuser_extensions: dict): + print("---- Configuring superuser extensions") + for extension, should_be_enabled in superuser_extensions.items(): + if should_be_enabled: + print(f"------ Enabling {extension} extension") + db.execute(conn, f"CREATE EXTENSION IF NOT EXISTS {identifier(extension)} SCHEMA pg_catalog") + else: + print(f"------ Disabling or skipping {extension} extension") + db.execute(conn, f"DROP EXTENSION IF EXISTS {identifier(extension)}") diff --git a/infra/modules/database/role_manager/requirements.txt b/infra/modules/database/role_manager/requirements.txt new file mode 100644 index 00000000..a63f2374 --- /dev/null +++ b/infra/modules/database/role_manager/requirements.txt @@ -0,0 +1 @@ +pg8000 \ No newline at end of file diff --git a/infra/modules/database/role_manager/role_manager.py b/infra/modules/database/role_manager/role_manager.py new file mode 100644 index 00000000..20898ef7 --- /dev/null +++ b/infra/modules/database/role_manager/role_manager.py @@ -0,0 +1,11 @@ +from check import check +from manage import manage + + +def lambda_handler(event, context): + action = event["action"] + assert action in ("check", "manage") + if action == "check": + return check(event["config"]) + elif action == "manage": + return manage(event["config"]) diff --git a/infra/modules/database/variables.tf b/infra/modules/database/variables.tf new file mode 100644 index 00000000..8033fa7e --- /dev/null +++ b/infra/modules/database/variables.tf @@ -0,0 +1,67 @@ +variable "name" { + description = "name of the database cluster. Note that this is not the name of the Postgres database itself, but the name of the cluster in RDS. The name of the Postgres database is set in module and defaults to 'app'." + type = string + validation { + condition = can(regex("^[-_\\da-z]+$", var.name)) + error_message = "use only lower case letters, numbers, dashes, and underscores" + } +} + +variable "app_access_policy_name" { + description = "name of the IAM policy to create that will provide the service the ability to connect to the database as a user that will have read/write access." + type = string +} + +variable "migrator_access_policy_name" { + description = "name of the IAM policy to create that will provide the migration task the ability to connect to the database as a user that will have read/write access." + type = string +} + +variable "app_username" { + description = "name of the database user to create that will be for the application." + type = string +} + +variable "migrator_username" { + description = "name of the database user to create that will be for the role that will run database migrations." + type = string +} + +variable "schema_name" { + description = "name of the Postgres schema to create that will be the schema the application will use (rather than using the public schema)" + type = string +} + +variable "port" { + description = "value of the port on which the database accepts connections. Defaults to 5432." + default = 5432 +} + +variable "database_name" { + description = "the name of the Postgres database. Defaults to 'app'." + default = "app" + validation { + condition = can(regex("^[_\\da-z]+$", var.database_name)) + error_message = "use only lower case letters, numbers, and underscores (no dashes)" + } +} + +variable "vpc_id" { + type = string + description = "Uniquely identifies the VPC." +} + +variable "database_subnet_group_name" { + type = string + description = "Name of database subnet group" +} + +variable "private_subnet_ids" { + type = list(any) + description = "list of private subnet IDs to put the role provisioner and role checker lambda functions in" +} + +variable "aws_services_security_group_id" { + type = string + description = "Security group ID for VPC endpoints that access AWS Services" +} diff --git a/infra/modules/domain/certificates.tf b/infra/modules/domain/certificates.tf new file mode 100644 index 00000000..d935414a --- /dev/null +++ b/infra/modules/domain/certificates.tf @@ -0,0 +1,59 @@ +locals { + # Filter configs for issued certificates. + # These certificates are managed by the project using AWS Certificate Manager. + issued_certificate_configs = { + for domain, config in var.certificate_configs : domain => config + if config.source == "issued" + } + + # Filter configs for imported certificates. + # These certificates are created outside of the project and imported. + imported_certificate_configs = { + for domain, config in var.certificate_configs : domain => config + if config.source == "imported" + } + + domain_validation_options = merge([ + for domain, config in local.issued_certificate_configs : + { + for dvo in aws_acm_certificate.issued[domain].domain_validation_options : + dvo.domain_name => { + name = dvo.resource_record_name + record = dvo.resource_record_value + type = dvo.resource_record_type + } + } + ]...) +} + +# ACM certificate that will be used by the load balancer. +resource "aws_acm_certificate" "issued" { + for_each = local.issued_certificate_configs + + domain_name = each.key + validation_method = "DNS" + + lifecycle { + create_before_destroy = true + } +} + +# DNS records for certificate validation. +resource "aws_route53_record" "validation" { + for_each = local.domain_validation_options + + allow_overwrite = true + zone_id = aws_route53_zone.zone[0].zone_id + name = each.value.name + type = each.value.type + ttl = 60 + records = [each.value.record] +} + +# Representation of successful validation of the ACM certificate. +resource "aws_acm_certificate_validation" "validation" { + for_each = local.imported_certificate_configs + + certificate_arn = aws_acm_certificate.issued[each.key].arn + validation_record_fqdns = [for record in aws_route53_record.validation : record.fqdn] +} diff --git a/infra/modules/domain/main.tf b/infra/modules/domain/main.tf new file mode 100644 index 00000000..931163f3 --- /dev/null +++ b/infra/modules/domain/main.tf @@ -0,0 +1,10 @@ +# Create a Route53 hosted zone for the domain. +# Individual address records will be created in the service layer by the services that +# need them (e.g. the load balancer or CDN). +# If DNS is managed elsewhere then this resource will not be created. +resource "aws_route53_zone" "zone" { + count = var.manage_dns ? 1 : 0 + name = var.name + + # checkov:skip=CKV2_AWS_38:TODO(https://github.com/navapbc/template-infra/issues/560) enable DNSSEC +} diff --git a/infra/modules/domain/outputs.tf b/infra/modules/domain/outputs.tf new file mode 100644 index 00000000..bf1615ec --- /dev/null +++ b/infra/modules/domain/outputs.tf @@ -0,0 +1,9 @@ +output "hosted_zone_name_servers" { + value = length(aws_route53_zone.zone) > 0 ? aws_route53_zone.zone[0].name_servers : [] +} + +output "certificate_arns" { + value = { + for domain in keys(var.certificate_configs) : domain => aws_acm_certificate.issued[domain].arn + } +} diff --git a/infra/modules/domain/query-logs.tf b/infra/modules/domain/query-logs.tf new file mode 100644 index 00000000..bc2e5c78 --- /dev/null +++ b/infra/modules/domain/query-logs.tf @@ -0,0 +1,43 @@ +# DNS query logging + +resource "aws_cloudwatch_log_group" "dns_query_logging" { + count = var.manage_dns ? 1 : 0 + + name = "/dns/${var.name}" + retention_in_days = 30 + + # checkov:skip=CKV_AWS_158:No need to manage KMS key for DNS query logs or audit access to these logs +} + +resource "aws_route53_query_log" "dns_query_logging" { + count = var.manage_dns ? 1 : 0 + + zone_id = aws_route53_zone.zone[0].zone_id + cloudwatch_log_group_arn = aws_cloudwatch_log_group.dns_query_logging[0].arn + + depends_on = [aws_cloudwatch_log_resource_policy.dns_query_logging[0]] +} + +# Allow Route53 to write logs to any log group under /dns/* +data "aws_iam_policy_document" "dns_query_logging" { + statement { + actions = [ + "logs:CreateLogStream", + "logs:PutLogEvents", + ] + + resources = ["arn:aws:logs:*:*:log-group:/dns/*"] + + principals { + identifiers = ["route53.amazonaws.com"] + type = "Service" + } + } +} + +resource "aws_cloudwatch_log_resource_policy" "dns_query_logging" { + count = var.manage_dns ? 1 : 0 + + policy_document = data.aws_iam_policy_document.dns_query_logging.json + policy_name = "dns-query-logging" +} diff --git a/infra/modules/domain/variables.tf b/infra/modules/domain/variables.tf new file mode 100644 index 00000000..5532c852 --- /dev/null +++ b/infra/modules/domain/variables.tf @@ -0,0 +1,51 @@ +variable "name" { + type = string + description = "Fully qualified domain name" +} + +variable "manage_dns" { + type = bool + description = "Whether DNS is managed by the project (true) or managed externally (false)" +} + +variable "certificate_configs" { + type = map(object({ + source = string + private_key = optional(string) + certificate_body = optional(string) + })) + description = < [ + for subnet in data.aws_subnet.private[*] : + subnet.id + if contains(data.aws_vpc_endpoint_service.aws_service[service].availability_zones, subnet.availability_zone) + ] + } +} + +resource "aws_security_group" "aws_services" { + name_prefix = var.aws_services_security_group_name_prefix + description = "VPC endpoints to access AWS services from the VPCs private subnets" + vpc_id = module.aws_vpc.vpc_id +} + +resource "aws_vpc_endpoint" "interface" { + for_each = local.interface_vpc_endpoints + + vpc_id = module.aws_vpc.vpc_id + service_name = "com.amazonaws.${data.aws_region.current.name}.${each.key}" + vpc_endpoint_type = "Interface" + security_group_ids = [aws_security_group.aws_services.id] + subnet_ids = local.aws_service_subnets[each.key] + private_dns_enabled = true +} + +resource "aws_vpc_endpoint" "gateway" { + for_each = local.gateway_vpc_endpoints + + vpc_id = module.aws_vpc.vpc_id + service_name = "com.amazonaws.${data.aws_region.current.name}.${each.key}" + vpc_endpoint_type = "Gateway" + route_table_ids = module.aws_vpc.private_route_table_ids +} diff --git a/infra/modules/secret/main.tf b/infra/modules/secret/main.tf new file mode 100644 index 00000000..8619c86e --- /dev/null +++ b/infra/modules/secret/main.tf @@ -0,0 +1,26 @@ +locals { + secret = var.manage_method == "generated" ? aws_ssm_parameter.secret[0] : data.aws_ssm_parameter.secret[0] + access_policy_name = "${trimprefix(replace(local.secret.name, "/", "-"), "/")}-access" +} + +resource "random_password" "secret" { + count = var.manage_method == "generated" ? 1 : 0 + + length = 64 + special = true + override_special = "!#$%&*()-_=+[]{}<>:?" +} + +resource "aws_ssm_parameter" "secret" { + count = var.manage_method == "generated" ? 1 : 0 + + name = var.secret_store_name + type = "SecureString" + value = random_password.secret[0].result +} + +data "aws_ssm_parameter" "secret" { + count = var.manage_method == "manual" ? 1 : 0 + + name = var.secret_store_name +} diff --git a/infra/modules/secret/outputs.tf b/infra/modules/secret/outputs.tf new file mode 100644 index 00000000..57ebfcf8 --- /dev/null +++ b/infra/modules/secret/outputs.tf @@ -0,0 +1,3 @@ +output "secret_arn" { + value = local.secret.arn +} diff --git a/infra/modules/secret/variables.tf b/infra/modules/secret/variables.tf new file mode 100644 index 00000000..53d00e06 --- /dev/null +++ b/infra/modules/secret/variables.tf @@ -0,0 +1,22 @@ +variable "manage_method" { + type = string + description = < 0 ? [1] : [] + content { + sid = "SecretsAccess" + actions = ["ssm:GetParameters"] + resources = [for secret in var.secrets : secret.valueFrom] + } + } +} + +resource "aws_iam_role_policy" "task_executor" { + name = "${var.service_name}-task-executor-role-policy" + role = aws_iam_role.task_executor.id + policy = data.aws_iam_policy_document.task_executor.json +} + +resource "aws_iam_role_policy_attachment" "extra_policies" { + for_each = var.extra_policies + + role = aws_iam_role.app_service.name + policy_arn = each.value +} diff --git a/infra/modules/service/access-logs.tf b/infra/modules/service/access-logs.tf new file mode 100644 index 00000000..3fd618f8 --- /dev/null +++ b/infra/modules/service/access-logs.tf @@ -0,0 +1,107 @@ +# This file defines resources for load balancer access logs +# including the S3 bucket where access logs are stored and +# the IAM policy granting the AWS Elastic Load Balancer service +# to write to the bucket +locals { + # This is needed to gran~t permissions to the ELB service for sending access logs to S3. + # The list was obtained from https://docs.aws.amazon.com/elasticloadbalancing/latest/application/enable-access-logging.html + elb_account_map = { + "us-east-1" : "127311923021", + "us-east-2" : "033677994240", + "us-west-1" : "027434742980", + "us-west-2" : "797873946194" + } + + # set log_file_transition = {} to disable lifecycle transitions. Additional lifecycle transitions can be added via a key value pair of `$STORAGE_CLASS=$DAYS` + log_file_transition = { + STANDARD_IA = 30 + GLACIER = 60 + } +} + +resource "aws_s3_bucket" "access_logs" { + bucket_prefix = "${var.service_name}-access-logs" + + # Use a separate line to support automated terraform destroy commands + force_destroy = false + + # checkov:skip=CKV2_AWS_62:Event notification not necessary for this bucket expecially due to likely use of lifecycle rules + # checkov:skip=CKV_AWS_18:Access logging was not considered necessary for this bucket + # checkov:skip=CKV_AWS_144:Not considered critical to the point of cross region replication + # checkov:skip=CKV_AWS_300:Known issue where Checkov gets confused by multiple rules + # checkov:skip=CKV_AWS_21:Bucket versioning is not worth it in this use case +} + +resource "aws_s3_bucket_public_access_block" "access_logs" { + bucket = aws_s3_bucket.access_logs.id + + block_public_acls = true + block_public_policy = true + ignore_public_acls = true + restrict_public_buckets = true +} + +data "aws_iam_policy_document" "access_logs_put_access" { + statement { + effect = "Allow" + resources = [ + aws_s3_bucket.access_logs.arn, + "${aws_s3_bucket.access_logs.arn}/*" + ] + actions = ["s3:PutObject"] + + principals { + type = "AWS" + identifiers = ["arn:aws:iam::${local.elb_account_map[data.aws_region.current.name]}:root"] + } + } +} + +resource "aws_s3_bucket_lifecycle_configuration" "access_logs" { + bucket = aws_s3_bucket.access_logs.id + + rule { + id = "AbortIncompleteUpload" + status = "Enabled" + abort_incomplete_multipart_upload { + days_after_initiation = 7 + } + } + + rule { + id = "StorageClass" + status = "Enabled" + dynamic "transition" { + for_each = local.log_file_transition + content { + days = transition.value + storage_class = transition.key + } + } + } + + rule { + id = "Expiration" + status = "Enabled" + expiration { + days = 2555 + } + } + # checkov:skip=CKV_AWS_300:There is a known issue where this check brings up false positives +} + + +resource "aws_s3_bucket_server_side_encryption_configuration" "encryption" { + bucket = aws_s3_bucket.access_logs.id + rule { + apply_server_side_encryption_by_default { + sse_algorithm = "aws:kms" + } + bucket_key_enabled = true + } +} + +resource "aws_s3_bucket_policy" "access_logs" { + bucket = aws_s3_bucket.access_logs.id + policy = data.aws_iam_policy_document.access_logs_put_access.json +} diff --git a/infra/modules/service/application-logs.tf b/infra/modules/service/application-logs.tf new file mode 100644 index 00000000..b0a8de47 --- /dev/null +++ b/infra/modules/service/application-logs.tf @@ -0,0 +1,15 @@ +#------ +# Logs +#------ + +# Cloudwatch log group to for streaming ECS application logs. +resource "aws_cloudwatch_log_group" "service_logs" { + name = local.log_group_name + + # Conservatively retain logs for 5 years. + # Looser requirements may allow shorter retention periods + retention_in_days = 1827 + + # TODO(https://github.com/navapbc/template-infra/issues/164) Encrypt with customer managed KMS key + # checkov:skip=CKV_AWS_158:Encrypt service logs with customer key in future work +} diff --git a/infra/modules/service/command-execution.tf b/infra/modules/service/command-execution.tf new file mode 100644 index 00000000..8210a9c7 --- /dev/null +++ b/infra/modules/service/command-execution.tf @@ -0,0 +1,31 @@ +#----------------- +# ECS Exec Access +# See https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-exec.html +#----------------- +resource "aws_iam_policy" "ecs_exec" { + name = "${var.service_name}-ecs-exec" + description = "Allow access to SSM Messages to support ECS Exec" + policy = data.aws_iam_policy_document.ecs_exec.json +} + +data "aws_iam_policy_document" "ecs_exec" { + # Allow ECS to access SSM Messages so that ECS Exec works + # See https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-exec.html + statement { + sid = "SSMAccess" + effect = "Allow" + actions = [ + "ssmmessages:CreateControlChannel", + "ssmmessages:CreateDataChannel", + "ssmmessages:OpenControlChannel", + "ssmmessages:OpenDataChannel", + ] + resources = ["*"] + } +} + +resource "aws_iam_role_policy_attachment" "ecs_exec" { + count = var.enable_command_execution ? 1 : 0 + role = aws_iam_role.app_service.name + policy_arn = aws_iam_policy.ecs_exec.arn +} diff --git a/infra/modules/service/database-access.tf b/infra/modules/service/database-access.tf new file mode 100644 index 00000000..77ffa9bf --- /dev/null +++ b/infra/modules/service/database-access.tf @@ -0,0 +1,29 @@ +#----------------- +# Database Access +#----------------- + +resource "aws_vpc_security_group_ingress_rule" "db_ingress_from_service" { + count = var.db_vars != null ? length(var.db_vars.security_group_ids) : 0 + + security_group_id = var.db_vars.security_group_ids[count.index] + description = "Allow inbound requests to database from ${var.service_name} service" + + from_port = tonumber(var.db_vars.connection_info.port) + to_port = tonumber(var.db_vars.connection_info.port) + ip_protocol = "tcp" + referenced_security_group_id = aws_security_group.app.id +} + +resource "aws_iam_role_policy_attachment" "app_service_db_access" { + count = var.db_vars != null ? 1 : 0 + + role = aws_iam_role.app_service.name + policy_arn = var.db_vars.app_access_policy_arn +} + +resource "aws_iam_role_policy_attachment" "migrator_db_access" { + count = var.db_vars != null ? 1 : 0 + + role = aws_iam_role.migrator_task[0].name + policy_arn = var.db_vars.migrator_access_policy_arn +} diff --git a/infra/modules/service/dns.tf b/infra/modules/service/dns.tf new file mode 100644 index 00000000..cc6781a1 --- /dev/null +++ b/infra/modules/service/dns.tf @@ -0,0 +1,13 @@ +resource "aws_route53_record" "app" { + # Don't create DNS record for temporary environments (e.g. ones spun up by CI/) + count = !var.is_temporary && var.domain_name != null && var.hosted_zone_id != null ? 1 : 0 + + name = var.domain_name + zone_id = var.hosted_zone_id + type = "A" + alias { + name = aws_lb.alb.dns_name + zone_id = aws_lb.alb.zone_id + evaluate_target_health = true + } +} diff --git a/infra/modules/service/jobs.tf b/infra/modules/service/jobs.tf new file mode 100644 index 00000000..0d0bef71 --- /dev/null +++ b/infra/modules/service/jobs.tf @@ -0,0 +1,106 @@ +#----------------- +# Background Jobs +#----------------- +# CloudWatch Event Rules and CloudWatch Event Targets that define event-based +# triggers for background jobs, such as jobs that trigger when a file is +# uploaded to an S3 bucket or jobs that trigger on a specified "cron" schedule. +# +# For each job configuration, there is a single event rule and an associated +# event target +# + +# Event rules that trigger whenever an object is created in S3 +# for a particular source bucket and object key prefix +resource "aws_cloudwatch_event_rule" "file_upload_jobs" { + for_each = var.file_upload_jobs + + name = "${local.cluster_name}-${each.key}" + description = "File uploaded to bucket ${each.value.source_bucket} with path prefix ${each.value.path_prefix}" + + event_pattern = jsonencode({ + source = ["aws.s3"], + detail-type = ["Object Created"], + detail = { + bucket = { + name = [each.value.source_bucket] + }, + object = { + key = [{ + prefix = each.value.path_prefix + }] + } + } + }) +} + +# Event target for each event rule that specifies what task command to run + +resource "aws_cloudwatch_event_target" "document_upload_jobs" { + for_each = var.file_upload_jobs + + target_id = "${local.cluster_name}-${each.key}" + rule = aws_cloudwatch_event_rule.file_upload_jobs[each.key].name + arn = aws_ecs_cluster.cluster.arn + role_arn = aws_iam_role.events.arn + + ecs_target { + task_definition_arn = aws_ecs_task_definition.app.arn + launch_type = "FARGATE" + + # Configuring Network Configuration is required when the task definition uses the awsvpc network mode. + network_configuration { + subnets = var.private_subnet_ids + security_groups = [aws_security_group.app.id] + } + } + + input_transformer { + input_paths = { + bucket_name = "$.detail.bucket.name", + object_key = "$.detail.object.key", + } + + # When triggering the ECS task, override the command to run in the container to the + # command specified by the file_upload_job config. To do this define an input_template + # that transforms the input S3 event: + # { + # detail: { + # bucket: { name: "mybucket" }, + # object: { key: "uploaded/file/path" } + # } + # } + # to match the Amazon ECS RunTask TaskOverride structure: + # { + # containerOverrides: [{ + # name: "container_name", + # command: ["command", "to", "run"] + # }] + # } + # (see https://docs.aws.amazon.com/eventbridge/latest/userguide/eb-targets.html#targets-specifics-ecs-task + # and https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_TaskOverride.html) + # + # The task command can optionally use the bucket name or the object key in the command + # by including the placeholder values "" or "", e.g. + # { + # containerOverrides: [{ + # name: "container_name", + # command: ["process_file.sh", "--bucket", "", "--object", ""] + # }] + # } + # + # Since jsonencode will cause the string "" to turn into + # "U+003Cbucket_nameU+003E" and "" to turn into "U+003Cobject_keyU+003E", + # we need to replace the unicode characters U+003C and U+003E with < and > to reverse + # the encoding. + # (see https://developer.hashicorp.com/terraform/language/functions/jsonencode and + # https://github.com/hashicorp/terraform/pull/18871) + input_template = replace(replace(jsonencode({ + containerOverrides = [ + { + name = local.container_name, + command = each.value.task_command + } + ] + }), "\\u003c", "<"), "\\u003e", ">") + } +} diff --git a/infra/modules/service/load-balancer.tf b/infra/modules/service/load-balancer.tf new file mode 100644 index 00000000..b61c0954 --- /dev/null +++ b/infra/modules/service/load-balancer.tf @@ -0,0 +1,135 @@ +#--------------- +# Load balancer +#--------------- + +# ALB for an app running in ECS +resource "aws_lb" "alb" { + depends_on = [aws_s3_bucket_policy.access_logs] + name = var.service_name + idle_timeout = "120" + internal = false + security_groups = [aws_security_group.alb.id] + subnets = var.public_subnet_ids + + # Use a separate line to support automated terraform destroy commands + # checkov:skip=CKV_AWS_150:Allow deletion for automated tests + enable_deletion_protection = !var.is_temporary + + # TODO(https://github.com/navapbc/template-infra/issues/163) Implement HTTPS + # checkov:skip=CKV2_AWS_20:Redirect HTTP to HTTPS as part of implementing HTTPS support + + # TODO(https://github.com/navapbc/template-infra/issues/165) Protect ALB with WAF + # checkov:skip=CKV2_AWS_28:Implement WAF in issue #165 + + # Drop invalid HTTP headers for improved security + # Note that header names cannot contain underscores + # https://docs.bridgecrew.io/docs/ensure-that-alb-drops-http-headers + drop_invalid_header_fields = true + + access_logs { + bucket = aws_s3_bucket.access_logs.id + prefix = "${var.service_name}-lb" + enabled = true + } +} + +# NOTE: for the demo we expose private http endpoint +# due to the complexity of acquiring a valid TLS/SSL cert. +# In a production system we would provision an https listener +resource "aws_lb_listener" "alb_listener_http" { + # TODO(https://github.com/navapbc/template-infra/issues/163) Use HTTPS protocol + # checkov:skip=CKV_AWS_2:Implement HTTPS in issue #163 + # checkov:skip=CKV_AWS_103:Require TLS 1.2 as part of implementing HTTPS support + + load_balancer_arn = aws_lb.alb.arn + port = "80" + protocol = "HTTP" + + default_action { + type = "fixed-response" + + fixed_response { + content_type = "text/plain" + message_body = "Not Found" + status_code = "404" + } + } +} + +resource "aws_lb_listener_rule" "app_http_forward" { + listener_arn = aws_lb_listener.alb_listener_http.arn + priority = 100 + + action { + type = "forward" + target_group_arn = aws_lb_target_group.app_tg.arn + } + condition { + path_pattern { + values = ["/*"] + } + } +} + +resource "aws_lb_listener" "alb_listener_https" { + count = var.certificate_arn != null ? 1 : 0 + + load_balancer_arn = aws_lb.alb.arn + port = 443 + protocol = "HTTPS" + certificate_arn = var.certificate_arn + + # Use security policy that supports TLS 1.3 but requires at least TLS 1.2 + ssl_policy = "ELBSecurityPolicy-TLS13-1-2-2021-06" + + default_action { + type = "fixed-response" + + fixed_response { + content_type = "text/plain" + message_body = "Not Found" + status_code = "404" + } + } +} + +resource "aws_lb_listener_rule" "app_https_forward" { + count = var.certificate_arn != null ? 1 : 0 + + listener_arn = aws_lb_listener.alb_listener_https[0].arn + priority = 100 + + action { + type = "forward" + target_group_arn = aws_lb_target_group.app_tg.arn + } + condition { + path_pattern { + values = ["/*"] + } + } +} + +resource "aws_lb_target_group" "app_tg" { + # you must use a prefix, to facilitate successful tg changes + name_prefix = "app-" + port = var.container_port + protocol = "HTTP" + vpc_id = var.vpc_id + target_type = "ip" + deregistration_delay = "30" + + health_check { + path = "/health" + port = var.container_port + healthy_threshold = 2 + unhealthy_threshold = 10 + interval = 30 + timeout = 29 + matcher = "200-299" + } + + lifecycle { + create_before_destroy = true + } +} diff --git a/infra/modules/service/main.tf b/infra/modules/service/main.tf new file mode 100644 index 00000000..2397baaf --- /dev/null +++ b/infra/modules/service/main.tf @@ -0,0 +1,143 @@ +data "aws_caller_identity" "current" {} +data "aws_region" "current" {} +data "aws_ecr_repository" "app" { + name = var.image_repository_name +} + +locals { + alb_name = var.service_name + cluster_name = var.service_name + container_name = var.service_name + log_group_name = "service/${var.service_name}" + log_stream_prefix = var.service_name + task_executor_role_name = "${var.service_name}-task-executor" + image_url = "${data.aws_ecr_repository.app.repository_url}:${var.image_tag}" + + base_environment_variables = [ + { name : "PORT", value : tostring(var.container_port) }, + { name : "AWS_DEFAULT_REGION", value : data.aws_region.current.name }, + { name : "AWS_REGION", value : data.aws_region.current.name }, + { name : "IMAGE_TAG", value : var.image_tag }, + ] + db_environment_variables = var.db_vars == null ? [] : [ + { name : "DB_HOST", value : var.db_vars.connection_info.host }, + { name : "DB_PORT", value : var.db_vars.connection_info.port }, + { name : "DB_USER", value : var.db_vars.connection_info.user }, + { name : "DB_NAME", value : var.db_vars.connection_info.db_name }, + { name : "DB_SCHEMA", value : var.db_vars.connection_info.schema_name }, + ] + environment_variables = concat( + local.base_environment_variables, + local.db_environment_variables, + [ + for name, value in var.extra_environment_variables : + { name : name, value : value } + ], + ) +} + +#------------------- +# Service Execution +#------------------- + +resource "aws_ecs_service" "app" { + name = var.service_name + cluster = aws_ecs_cluster.cluster.arn + launch_type = "FARGATE" + task_definition = aws_ecs_task_definition.app.arn + desired_count = var.desired_instance_count + enable_execute_command = var.enable_command_execution ? true : null + + # Allow changes to the desired_count without differences in terraform plan. + # This allows autoscaling to manage the desired count for us. + lifecycle { + ignore_changes = [desired_count] + } + + network_configuration { + assign_public_ip = false + subnets = var.private_subnet_ids + security_groups = [aws_security_group.app.id] + } + + load_balancer { + target_group_arn = aws_lb_target_group.app_tg.arn + container_name = var.service_name + container_port = var.container_port + } +} + +resource "aws_ecs_task_definition" "app" { + family = var.service_name + execution_role_arn = aws_iam_role.task_executor.arn + task_role_arn = aws_iam_role.app_service.arn + ephemeral_storage { size_in_gib = 40 } + + container_definitions = jsonencode([ + { + name = local.container_name, + image = local.image_url, + memory = var.memory, + cpu = var.cpu, + networkMode = "awsvpc", + essential = true, + readonlyRootFilesystem = false, + + # Need to define all parameters in the healthCheck block even if we want + # to use AWS's defaults, otherwise the terraform plan will show a diff + # that will force a replacement of the task definition + healthCheck = { + interval = 30, + retries = 3, + timeout = 5, + command = ["CMD-SHELL", + "wget --no-verbose --tries=1 --spider http://localhost:${var.container_port}/health || exit 1" + ] + }, + environment = local.environment_variables, + secrets = var.secrets, + portMappings = [ + { + containerPort = var.container_port, + hostPort = var.container_port, + protocol = "tcp" + } + ], + linuxParameters = { + capabilities = { + add = [] + drop = ["ALL"] + }, + initProcessEnabled = true + }, + logConfiguration = { + logDriver = "awslogs", + options = { + "awslogs-group" = aws_cloudwatch_log_group.service_logs.name, + "awslogs-region" = data.aws_region.current.name, + "awslogs-stream-prefix" = local.log_stream_prefix + } + } + mountPoints = [] + systemControls = [] + volumesFrom = [] + } + ]) + + cpu = var.cpu + memory = var.memory + + requires_compatibilities = ["FARGATE"] + + # Reference https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-networking.html + network_mode = "awsvpc" +} + +resource "aws_ecs_cluster" "cluster" { + name = local.cluster_name + + setting { + name = "containerInsights" + value = "enabled" + } +} diff --git a/infra/modules/service/networking.tf b/infra/modules/service/networking.tf new file mode 100644 index 00000000..5e391812 --- /dev/null +++ b/infra/modules/service/networking.tf @@ -0,0 +1,88 @@ +#----------------------- +# Network Configuration +#----------------------- + +resource "aws_security_group" "alb" { + # Specify name_prefix instead of name because when a change requires creating a new + # security group, sometimes the change requires the new security group to be created + # before the old one is destroyed. In this situation, the new one needs a unique name + name_prefix = "${var.service_name}-alb" + description = "Allow TCP traffic to application load balancer" + + lifecycle { + create_before_destroy = true + + # changing the description is a destructive change + # just ignore it + ignore_changes = [description] + } + + vpc_id = var.vpc_id + + # TODO(https://github.com/navapbc/template-infra/issues/163) Disallow incoming traffic to port 80 + # checkov:skip=CKV_AWS_260:Disallow ingress from 0.0.0.0:0 to port 80 when implementing HTTPS support in issue #163 + ingress { + description = "Allow HTTP traffic from public internet" + from_port = 80 + to_port = 80 + protocol = "tcp" + cidr_blocks = ["0.0.0.0/0"] + } + + ingress { + description = "Allow HTTPS traffic from public internet" + from_port = 443 + to_port = 443 + protocol = "tcp" + cidr_blocks = ["0.0.0.0/0"] + } + + egress { + description = "Allow all outgoing traffic" + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] + } +} + +# Security group to allow access to Fargate tasks +resource "aws_security_group" "app" { + # Specify name_prefix instead of name because when a change requires creating a new + # security group, sometimes the change requires the new security group to be created + # before the old one is destroyed. In this situation, the new one needs a unique name + name_prefix = "${var.service_name}-app" + description = "Allow inbound TCP access to application container port" + vpc_id = var.vpc_id + lifecycle { + create_before_destroy = true + } +} + +resource "aws_vpc_security_group_egress_rule" "service_egress_to_all" { + security_group_id = aws_security_group.app.id + description = "Allow all outgoing traffic from application" + + ip_protocol = "-1" + cidr_ipv4 = "0.0.0.0/0" +} + +resource "aws_vpc_security_group_ingress_rule" "service_ingress_from_load_balancer" { + security_group_id = aws_security_group.app.id + description = "Allow HTTP traffic to application container port" + + from_port = var.container_port + to_port = var.container_port + ip_protocol = "tcp" + referenced_security_group_id = aws_security_group.alb.id +} + +resource "aws_vpc_security_group_ingress_rule" "vpc_endpoints_ingress_from_service" { + security_group_id = var.aws_services_security_group_id + description = "Allow inbound requests to VPC endpoints from role manager" + + from_port = 443 + to_port = 443 + ip_protocol = "tcp" + referenced_security_group_id = aws_security_group.app.id +} diff --git a/infra/modules/service/outputs.tf b/infra/modules/service/outputs.tf new file mode 100644 index 00000000..9be53806 --- /dev/null +++ b/infra/modules/service/outputs.tf @@ -0,0 +1,26 @@ +output "public_endpoint" { + description = "The public endpoint for the service." + value = "http://${aws_lb.alb.dns_name}" +} + +output "cluster_name" { + value = aws_ecs_cluster.cluster.name +} + +output "load_balancer_arn_suffix" { + description = "The ARN suffix for use with CloudWatch Metrics." + value = aws_lb.alb.arn_suffix +} + +output "application_log_group" { + value = local.log_group_name +} + +output "application_log_stream_prefix" { + value = local.log_stream_prefix +} + +output "migrator_role_arn" { + description = "ARN for role to use for migration" + value = length(aws_iam_role.migrator_task) > 0 ? aws_iam_role.migrator_task[0].arn : null +} diff --git a/infra/modules/service/task-scheduler-role.tf b/infra/modules/service/task-scheduler-role.tf new file mode 100644 index 00000000..d0b4393c --- /dev/null +++ b/infra/modules/service/task-scheduler-role.tf @@ -0,0 +1,57 @@ +#--------------------- +# Task Scheduler Role +#--------------------- +# Role and policy used by AWS EventBridge to trigger jobs from events +# + +# Role that EventBridge will assume +# The role allows EventBridge to run tasks on the ECS cluster +resource "aws_iam_role" "events" { + name = "${local.cluster_name}-events" + managed_policy_arns = [aws_iam_policy.run_task.arn] + assume_role_policy = data.aws_iam_policy_document.events_assume_role.json +} + +data "aws_iam_policy_document" "events_assume_role" { + statement { + effect = "Allow" + actions = ["sts:AssumeRole"] + principals { + type = "Service" + identifiers = ["events.amazonaws.com"] + } + } +} + +# Policy that allows running tasks on the ECS cluster +resource "aws_iam_policy" "run_task" { + name = "${var.service_name}-run-access" + policy = data.aws_iam_policy_document.run_task.json +} + +data "aws_iam_policy_document" "run_task" { + statement { + effect = "Allow" + actions = ["ecs:RunTask"] + resources = ["${aws_ecs_task_definition.app.arn_without_revision}:*"] + condition { + test = "ArnLike" + variable = "ecs:cluster" + values = [aws_ecs_cluster.cluster.arn] + } + } + + statement { + effect = "Allow" + actions = ["iam:PassRole"] + resources = [ + aws_iam_role.task_executor.arn, + aws_iam_role.app_service.arn, + ] + condition { + test = "StringLike" + variable = "iam:PassedToService" + values = ["ecs-tasks.amazonaws.com"] + } + } +} diff --git a/infra/modules/service/variables.tf b/infra/modules/service/variables.tf new file mode 100644 index 00000000..5346f1cf --- /dev/null +++ b/infra/modules/service/variables.tf @@ -0,0 +1,157 @@ +variable "service_name" { + description = "name of the service, to be used for infra structure resource naming" + validation { + condition = can(regex("^[-_\\da-z]+$", var.service_name)) + error_message = "use only lower case letters, numbers, dashes, and underscores" + } +} + +variable "domain_name" { + type = string + description = "The fully qualified domain name for the application" + default = null +} + +variable "certificate_arn" { + type = string + description = "The ARN of the certificate to use for the application" + default = null +} + +variable "hosted_zone_id" { + type = string + description = "The Route53 hosted zone id for the domain" + default = null +} + +variable "image_tag" { + type = string + description = "The tag of the image to deploy" +} + +variable "image_repository_name" { + type = string + description = "The name of the container image repository" +} + +variable "desired_instance_count" { + type = number + description = "Number of instances of the task definition to place and keep running." + default = 1 +} + +variable "cpu" { + type = number + default = 256 + description = "Number of cpu units used by the task, expessed as an integer value, e.g 512 " +} + +variable "memory" { + type = number + default = 512 + description = "Amount (in MiB) of memory used by the task. e.g. 2048" +} + +variable "enable_command_execution" { + type = bool + default = false + description = "Whether the service should enable ECS Exec, such as for debugging" +} + +variable "container_port" { + type = number + description = "The port number on the container that's bound to the user-specified" + default = 8000 +} + +variable "vpc_id" { + type = string + description = "Uniquely identifies the VPC." +} + +variable "public_subnet_ids" { + type = list(any) + description = "Public subnet ids in VPC" +} + +variable "private_subnet_ids" { + type = list(any) + description = "Private subnet ids in VPC" +} + +variable "aws_services_security_group_id" { + type = string + description = "Security group ID for VPC endpoints that access AWS Services" +} + +variable "extra_environment_variables" { + type = map(string) + description = "Additional environment variables to pass to the service container. Map from environment variable name to the value." + default = {} +} + +variable "secrets" { + type = set(object({ + name = string + valueFrom = string + })) + description = "List of configurations for defining environment variables that pull from SSM parameter store" + default = [] +} + +variable "db_vars" { + description = "Variables for integrating the app service with a database" + type = object({ + security_group_ids = list(string) + app_access_policy_arn = string + migrator_access_policy_arn = string + connection_info = object({ + host = string + port = string + user = string + db_name = string + schema_name = string + }) + }) + default = null +} + +variable "extra_policies" { + description = "Map of extra IAM policies to attach to the service's task role. The map's keys define the resource name in terraform." + type = map(string) + default = {} +} + +variable "file_upload_jobs" { + type = map(object({ + source_bucket = string + path_prefix = string + task_command = list(string) + })) + + description = <` + and ``. For example if task_command is: + + ["python", "etl.py", ""] + + Then if an object was uploaded to s3://somebucket/path/to/file.txt, the + task will execute the command: + + python etl.py path/to/file.txt + EOT + default = {} +} + +variable "is_temporary" { + description = "Whether the service is meant to be spun up temporarily (e.g. for automated infra tests). This is used to disable deletion protection for the load balancer." + type = bool +} diff --git a/infra/modules/storage/access-control.tf b/infra/modules/storage/access-control.tf new file mode 100644 index 00000000..ee34e6ac --- /dev/null +++ b/infra/modules/storage/access-control.tf @@ -0,0 +1,65 @@ +# Block public access +resource "aws_s3_bucket_public_access_block" "storage" { + bucket = aws_s3_bucket.storage.id + + block_public_acls = true + block_public_policy = true + ignore_public_acls = true + restrict_public_buckets = true +} + +# Bucket policy that requires HTTPS +resource "aws_s3_bucket_policy" "storage" { + bucket = aws_s3_bucket.storage.id + policy = data.aws_iam_policy_document.storage.json +} + +data "aws_iam_policy_document" "storage" { + statement { + sid = "RestrictToTLSRequestsOnly" + effect = "Deny" + actions = ["s3:*"] + resources = [aws_s3_bucket.storage.arn] + principals { + type = "*" + identifiers = ["*"] + } + condition { + test = "Bool" + variable = "aws:SecureTransport" + values = ["false"] + } + } +} + +# Create policy for read/write access +# Attach this policy to roles that need access to the bucket +resource "aws_iam_policy" "storage_access" { + name = "${var.name}-access" + policy = data.aws_iam_policy_document.storage_access.json +} + +data "aws_iam_policy_document" "storage_access" { + statement { + actions = [ + "s3:DeleteObject", + "s3:DeleteObjectTagging", + "s3:GetObject", + "s3:GetObjectAttributes", + "s3:GetObjectTagging", + "s3:ListBucket", + "s3:PutObject", + "s3:PutObjectTagging", + ] + effect = "Allow" + resources = [ + "arn:aws:s3:::${var.name}", + "arn:aws:s3:::${var.name}/*" + ] + } + statement { + actions = ["kms:GenerateDataKey", "kms:Decrypt"] + effect = "Allow" + resources = [aws_kms_key.storage.arn] + } +} diff --git a/infra/modules/storage/encryption.tf b/infra/modules/storage/encryption.tf new file mode 100644 index 00000000..71c5b58c --- /dev/null +++ b/infra/modules/storage/encryption.tf @@ -0,0 +1,18 @@ +resource "aws_kms_key" "storage" { + description = "KMS key for bucket ${var.name}" + # The waiting period, specified in number of days. After the waiting period ends, AWS KMS deletes the KMS key. + deletion_window_in_days = "10" + # Generates new cryptographic material every 365 days, this is used to encrypt your data. The KMS key retains the old material for decryption purposes. + enable_key_rotation = "true" +} + +resource "aws_s3_bucket_server_side_encryption_configuration" "storage" { + bucket = aws_s3_bucket.storage.id + rule { + apply_server_side_encryption_by_default { + kms_master_key_id = aws_kms_key.storage.arn + sse_algorithm = "aws:kms" + } + bucket_key_enabled = true + } +} diff --git a/infra/modules/storage/events.tf b/infra/modules/storage/events.tf new file mode 100644 index 00000000..80536435 --- /dev/null +++ b/infra/modules/storage/events.tf @@ -0,0 +1,7 @@ +# Tell S3 to publish events to EventBridge. Subscribers can then subscribe +# to these events in an event-based architecture. +# See https://docs.aws.amazon.com/AmazonS3/latest/userguide/ev-mapping-troubleshooting.html +resource "aws_s3_bucket_notification" "storage" { + bucket = aws_s3_bucket.storage.id + eventbridge = true +} diff --git a/infra/modules/storage/lifecycle.tf b/infra/modules/storage/lifecycle.tf new file mode 100644 index 00000000..174aa42c --- /dev/null +++ b/infra/modules/storage/lifecycle.tf @@ -0,0 +1,11 @@ +resource "aws_s3_bucket_lifecycle_configuration" "storage" { + bucket = aws_s3_bucket.storage.id + + rule { + id = "AbortIncompleteUpload" + status = "Enabled" + abort_incomplete_multipart_upload { + days_after_initiation = 7 + } + } +} diff --git a/infra/modules/storage/main.tf b/infra/modules/storage/main.tf new file mode 100644 index 00000000..3ae37226 --- /dev/null +++ b/infra/modules/storage/main.tf @@ -0,0 +1,12 @@ +resource "aws_s3_bucket" "storage" { + bucket = var.name + + # Use a separate line to support automated terraform destroy commands + force_destroy = false + + # checkov:skip=CKV_AWS_18:TODO(https://github.com/navapbc/template-infra/issues/507) Implement access logging + + # checkov:skip=CKV_AWS_144:Cross region replication not required by default + # checkov:skip=CKV2_AWS_62:S3 bucket does not need notifications enabled + # checkov:skip=CKV_AWS_21:Bucket versioning is not needed +} diff --git a/infra/modules/storage/outputs.tf b/infra/modules/storage/outputs.tf new file mode 100644 index 00000000..8337ee7a --- /dev/null +++ b/infra/modules/storage/outputs.tf @@ -0,0 +1,3 @@ +output "access_policy_arn" { + value = aws_iam_policy.storage_access.arn +} diff --git a/infra/modules/storage/variables.tf b/infra/modules/storage/variables.tf new file mode 100644 index 00000000..0a261c96 --- /dev/null +++ b/infra/modules/storage/variables.tf @@ -0,0 +1,4 @@ +variable "name" { + type = string + description = "Name of the AWS S3 bucket. Needs to be globally unique across all regions." +} diff --git a/infra/modules/terraform-backend-s3/README.md b/infra/modules/terraform-backend-s3/README.md new file mode 100644 index 00000000..29220db5 --- /dev/null +++ b/infra/modules/terraform-backend-s3/README.md @@ -0,0 +1,7 @@ +# Terraform S3 backend module + +This module creates resources for an [S3 backend for Terraform](https://www.terraform.io/language/settings/backends/s3). It creates the following resources: + +* S3 bucket to store [Terraform state files](https://www.terraform.io/language/state) +* S3 bucket to store [S3 access logs](https://docs.aws.amazon.com/AmazonS3/latest/userguide/ServerLogs.html) +* DynamoDB table to manage [terraform state locks](https://www.terraform.io/language/state/locking) diff --git a/infra/modules/terraform-backend-s3/main.tf b/infra/modules/terraform-backend-s3/main.tf new file mode 100644 index 00000000..cebb177f --- /dev/null +++ b/infra/modules/terraform-backend-s3/main.tf @@ -0,0 +1,277 @@ +data "aws_caller_identity" "current" {} +data "aws_region" "current" {} +data "aws_partition" "current" {} + +locals { + tf_state_bucket_name = var.name + tf_logs_bucket_name = "${var.name}-logs" + tf_locks_table_name = "${var.name}-state-locks" +} + +# Create the dynamodb table required for state locking. + +# Options for encryption are an AWS owned key, which is not unique to your account; AWS managed; or customer managed. The latter two options are more secure, and customer managed gives +# control over the key. This allows for ability to restrict access by key as well as policies attached to roles or users. +# https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html +resource "aws_kms_key" "tf_backend" { + description = "KMS key for DynamoDB table ${local.tf_locks_table_name}" + # The waiting period, specified in number of days. After the waiting period ends, AWS KMS deletes the KMS key. + deletion_window_in_days = "10" + # Generates new cryptographic material every 365 days, this is used to encrypt your data. The KMS key retains the old material for decryption purposes. + enable_key_rotation = "true" +} + +resource "aws_dynamodb_table" "terraform_lock" { + name = local.tf_locks_table_name + hash_key = "LockID" + billing_mode = "PAY_PER_REQUEST" + + attribute { + name = "LockID" + type = "S" + } + + server_side_encryption { + enabled = true + kms_key_arn = aws_kms_key.tf_backend.arn + } + + point_in_time_recovery { + enabled = true + } + +} + +# Create the S3 bucket used to store terraform state remotely. +resource "aws_s3_bucket" "tf_state" { + bucket = local.tf_state_bucket_name + + # checkov:skip=CKV_AWS_144:Cross region replication not required by default + # checkov:skip=CKV2_AWS_62:S3 bucket does not need notifications enabled + # checkov:skip=CKV2_AWS_61:No need to define S3 bucket lifecycle configuration to expire or transition tfstate files since they will always be needed and the file sizes are small anyways + + # Prevent accidental destruction a developer executing terraform destory in the wrong directory. Contains terraform state files. + lifecycle { + # Use a separate line to support automated terraform destroy commands + prevent_destroy = true + } +} + +resource "aws_s3_bucket_versioning" "tf_state" { + bucket = aws_s3_bucket.tf_state.id + versioning_configuration { + status = "Enabled" + } +} + +resource "aws_s3_bucket_server_side_encryption_configuration" "tf_state" { + bucket = aws_s3_bucket.tf_state.id + rule { + apply_server_side_encryption_by_default { + kms_master_key_id = aws_kms_key.tf_backend.arn + sse_algorithm = "aws:kms" + } + bucket_key_enabled = true + } +} + +resource "aws_s3_bucket_public_access_block" "tf_state" { + bucket = aws_s3_bucket.tf_state.id + + block_public_acls = true + block_public_policy = true + ignore_public_acls = true + restrict_public_buckets = true +} + +resource "aws_s3_bucket_ownership_controls" "tf_state" { + bucket = aws_s3_bucket.tf_state.id + + rule { + object_ownership = "BucketOwnerEnforced" + } +} + +data "aws_iam_policy_document" "tf_state" { + statement { + sid = "RequireTLS" + principals { + type = "AWS" + identifiers = ["*"] + } + actions = [ + "s3:*", + ] + + resources = [ + aws_s3_bucket.tf_state.arn, + "${aws_s3_bucket.tf_state.arn}/*" + ] + + effect = "Deny" + + condition { + test = "Bool" + variable = "aws:SecureTransport" + + values = [ + false + ] + } + } +} + +resource "aws_s3_bucket_policy" "tf_state" { + bucket = aws_s3_bucket.tf_state.id + policy = data.aws_iam_policy_document.tf_state.json +} + +# Create the S3 bucket to provide server access logging. +# +# Ignore bucket logging complaince check for this bucket since +# the bucket is used for logging only and doesn't need server access logging itself +# (see https://docs.aws.amazon.com/AmazonS3/latest/userguide/ServerLogs.html) +# tfsec:ignore:aws-s3-enable-bucket-logging +resource "aws_s3_bucket" "tf_log" { + bucket = local.tf_logs_bucket_name + + # checkov:skip=CKV_AWS_144:Cross region replication not required by default + # checkov:skip=CKV2_AWS_62:S3 bucket does not need notifications enabled +} + +resource "aws_s3_bucket_versioning" "tf_log" { + bucket = aws_s3_bucket.tf_log.id + versioning_configuration { + status = "Enabled" + } +} + +resource "aws_s3_bucket_server_side_encryption_configuration" "tf_log" { + bucket = aws_s3_bucket.tf_log.id + rule { + apply_server_side_encryption_by_default { + kms_master_key_id = aws_kms_key.tf_backend.arn + sse_algorithm = "aws:kms" + } + bucket_key_enabled = true + } +} + +resource "aws_s3_bucket_public_access_block" "tf_log" { + bucket = aws_s3_bucket.tf_log.id + + block_public_acls = true + block_public_policy = true + ignore_public_acls = true + restrict_public_buckets = true +} + +resource "aws_s3_bucket_ownership_controls" "tf_log" { + bucket = aws_s3_bucket.tf_log.id + + rule { + object_ownership = "BucketOwnerEnforced" + } +} + +# Move all log data to lower cost infrequent-access storage after 30 days. +resource "aws_s3_bucket_lifecycle_configuration" "tf_log" { + bucket = aws_s3_bucket.tf_log.id + expected_bucket_owner = data.aws_caller_identity.current.account_id + + rule { + id = "move-s3-to-ia" + status = "Enabled" + + abort_incomplete_multipart_upload { + days_after_initiation = 15 + } + + transition { + days = 30 + storage_class = "STANDARD_IA" + } + + noncurrent_version_transition { + noncurrent_days = 30 + storage_class = "STANDARD_IA" + } + } +} + +data "aws_iam_policy_document" "tf_log" { + statement { + sid = "RequireTLS" + principals { + type = "AWS" + identifiers = ["*"] + } + actions = [ + "s3:*", + ] + + resources = [ + aws_s3_bucket.tf_log.arn, + "${aws_s3_bucket.tf_log.arn}/*" + ] + + effect = "Deny" + + condition { + test = "Bool" + variable = "aws:SecureTransport" + + values = [ + false + ] + } + } + statement { + sid = "S3ServerAccessLogsPolicy" + principals { + type = "Service" + identifiers = [ + "logging.s3.amazonaws.com" + ] + } + actions = [ + "s3:PutObject", + ] + + resources = [ + "${aws_s3_bucket.tf_log.arn}/*" + ] + + effect = "Allow" + + condition { + test = "ArnLike" + variable = "aws:SourceArn" + + values = [ + "arn:${data.aws_partition.current.id}:s3:::${aws_s3_bucket.tf_log.id}" + ] + } + + condition { + test = "StringLike" + variable = "aws:SourceAccount" + + values = [ + data.aws_caller_identity.current.account_id + ] + } + } +} + +resource "aws_s3_bucket_policy" "tf_log" { + bucket = aws_s3_bucket.tf_log.id + policy = data.aws_iam_policy_document.tf_log.json +} + +resource "aws_s3_bucket_logging" "tf_state" { + bucket = aws_s3_bucket.tf_state.id + + target_bucket = aws_s3_bucket.tf_log.id + target_prefix = "logs/${aws_s3_bucket.tf_state.bucket}/" +} diff --git a/infra/modules/terraform-backend-s3/outputs.tf b/infra/modules/terraform-backend-s3/outputs.tf new file mode 100644 index 00000000..8ce14d4a --- /dev/null +++ b/infra/modules/terraform-backend-s3/outputs.tf @@ -0,0 +1,11 @@ +output "tf_state_bucket_name" { + value = aws_s3_bucket.tf_state.bucket +} + +output "tf_log_bucket_name" { + value = aws_s3_bucket.tf_log.bucket +} + +output "tf_locks_table_name" { + value = aws_dynamodb_table.terraform_lock.name +} diff --git a/infra/modules/terraform-backend-s3/variables.tf b/infra/modules/terraform-backend-s3/variables.tf new file mode 100644 index 00000000..5918083e --- /dev/null +++ b/infra/modules/terraform-backend-s3/variables.tf @@ -0,0 +1,4 @@ +variable "name" { + type = string + description = "The name of the backend resource. This will be used to prefix the names of the other backend resources." +} diff --git a/infra/networks/main.tf b/infra/networks/main.tf new file mode 100644 index 00000000..de966c20 --- /dev/null +++ b/infra/networks/main.tf @@ -0,0 +1,86 @@ +locals { + tags = merge(module.project_config.default_tags, { + network_name = var.network_name + description = "VPC resources" + }) + region = module.project_config.default_region + + network_config = module.project_config.network_configs[var.network_name] + domain_config = local.network_config.domain_config + + # List of configuration for all applications, even ones that are not in the current network + # If project has multiple applications, add other app configs to this list + app_configs = [module.app_config] + + # List of configuration for applications that are in the current network + # An application is in the current network if at least one of its environments + # is mapped to the network + apps_in_network = [ + for app in local.app_configs : + app + if anytrue([ + for environment_config in app.environment_configs : true if environment_config.network_name == var.network_name + ]) + ] + + # Whether any of the applications in the network have a database + has_database = anytrue([for app in local.apps_in_network : app.has_database]) + + # Whether any of the applications in the network have dependencies on an external non-AWS service + has_external_non_aws_service = anytrue([for app in local.apps_in_network : app.has_external_non_aws_service]) + + # Whether any of the applications in the network has an environment that needs container execution access + enable_command_execution = anytrue([ + for app in local.apps_in_network : + anytrue([ + for environment_config in app.environment_configs : true if environment_config.service_config.enable_command_execution == true && environment_config.network_name == var.network_name + ]) + ]) +} + +terraform { + required_version = "~>1.8.0" + + required_providers { + aws = { + source = "hashicorp/aws" + version = "~>5.6.0" + } + } + + backend "s3" { + encrypt = "true" + } +} + +provider "aws" { + region = local.region + default_tags { + tags = local.tags + } +} + +module "project_config" { + source = "../project-config" +} + +module "app_config" { + source = "../app/app-config" +} + +module "network" { + source = "../modules/network" + name = var.network_name + aws_services_security_group_name_prefix = module.project_config.aws_services_security_group_name_prefix + database_subnet_group_name = local.network_config.database_subnet_group_name + has_database = local.has_database + has_external_non_aws_service = local.has_external_non_aws_service + enable_command_execution = local.enable_command_execution +} + +module "domain" { + source = "../modules/domain" + name = local.domain_config.hosted_zone + manage_dns = local.domain_config.manage_dns + certificate_configs = local.domain_config.certificate_configs +} diff --git a/infra/networks/outputs.tf b/infra/networks/outputs.tf new file mode 100644 index 00000000..4c916802 --- /dev/null +++ b/infra/networks/outputs.tf @@ -0,0 +1,11 @@ +output "hosted_zone_name_servers" { + value = module.domain.hosted_zone_name_servers +} + +output "certificate_domains" { + value = keys(local.domain_config.certificate_configs) +} + +output "certificate_arns" { + value = module.domain.certificate_arns +} diff --git a/infra/networks/variables.tf b/infra/networks/variables.tf new file mode 100644 index 00000000..0182a92d --- /dev/null +++ b/infra/networks/variables.tf @@ -0,0 +1,4 @@ +variable "network_name" { + type = string + description = "Human readable identifier for the VPC" +} diff --git a/infra/project-config/README.md b/infra/project-config/README.md new file mode 100644 index 00000000..cc1d4a77 --- /dev/null +++ b/infra/project-config/README.md @@ -0,0 +1,19 @@ +# Common module + +The purpose of this module is to contain environment-agnostic items. e.g. tags that are common to all environments are stored here. + +## Usage + +```terraform +# Import the common module + +module "project_config" { + source = "../../project-config" +} + +# Combine common tags with environment specific tags. +tags = merge(module.project_config.default_tags, { + environment = "dev" + description = "Backend resources required for terraform state management." +}) +``` diff --git a/infra/project-config/aws-services.tf b/infra/project-config/aws-services.tf new file mode 100644 index 00000000..d1cd72d8 --- /dev/null +++ b/infra/project-config/aws-services.tf @@ -0,0 +1,35 @@ +locals { + aws_services = [ + "acm", + "apigateway", + "application-autoscaling", + "autoscaling", + "cloudwatch", + "cognito-idp", + "dynamodb", + "ec2", + "ecr", + "ecs", + "elasticbeanstalk", + "elasticloadbalancing", + "events", + "evidently", + "iam", + "kms", + "lambda", + "logs", + "pipes", + "rds", + "route53", + "route53domains", + "s3", + "scheduler", + "schemas", + "secretsmanager", + "servicediscovery", + "sns", + "ssm", + "waf-regional", + "wafv2", + ] +} diff --git a/infra/project-config/main.tf b/infra/project-config/main.tf new file mode 100644 index 00000000..e09c5d58 --- /dev/null +++ b/infra/project-config/main.tf @@ -0,0 +1,20 @@ +locals { + # Machine readable project name (lower case letters, dashes, and underscores) + # This will be used in names of AWS resources + project_name = "decision-support-tool" + + # Project owner (e.g. navapbc). Used for tagging infra resources. + owner = "navalabs" + + # URL of project source code repository + code_repository_url = "https://github.com/navapbc/labs-decision-support-tool" + + # Default AWS region for project (e.g. us-east-1, us-east-2, us-west-1). + # This is dependent on where your project is located (if regional) + # otherwise us-east-1 is a good default + default_region = "us-east-1" + + github_actions_role_name = "${local.project_name}-github-actions" + + aws_services_security_group_name_prefix = "aws-service-vpc-endpoints" +} diff --git a/infra/project-config/networks.tf b/infra/project-config/networks.tf new file mode 100644 index 00000000..0b3a98bd --- /dev/null +++ b/infra/project-config/networks.tf @@ -0,0 +1,55 @@ +locals { + network_configs = { + dev = { + database_subnet_group_name = "dev" + + domain_config = { + manage_dns = true + # Placeholder value for the hosted zone + # A hosted zone represents a domain and all of its subdomains. For example, a + # hosted zone of foo.domain.com includes foo.domain.com, bar.foo.domain.com, etc. + hosted_zone = "decision-support-tool-dev.navateam.com" + + certificate_configs = { + # Example certificate configuration for a certificate that is managed by the project + # "sub.domain.com" = { + # source = "issued" + # } + "decision-support-tool-dev.navateam.com" = { + source = "issued" + } + + # Example certificate configuration for a certificate that is issued elsewhere and imported into the project + # (currently not supported, will be supported via https://github.com/navapbc/template-infra/issues/559) + # "platform-test-dev.navateam.com" = { + # source = "imported" + # private_key_ssm_name = "/certificates/sub.domain.com/private-key" + # certificate_body_ssm_name = "/certificates/sub.domain.com/certificate-body" + # } + } + } + } + + staging = { + database_subnet_group_name = "staging" + + domain_config = { + manage_dns = true + hosted_zone = "hosted.zone.for.staging.network.com" + + certificate_configs = {} + } + } + + prod = { + database_subnet_group_name = "prod" + + domain_config = { + manage_dns = true + hosted_zone = "hosted.zone.for.prod.network.com" + + certificate_configs = {} + } + } + } +} diff --git a/infra/project-config/outputs.tf b/infra/project-config/outputs.tf new file mode 100644 index 00000000..d82461cb --- /dev/null +++ b/infra/project-config/outputs.tf @@ -0,0 +1,49 @@ +output "project_name" { + value = local.project_name +} + +output "owner" { + value = local.owner +} + +output "code_repository_url" { + value = local.code_repository_url +} + +output "code_repository" { + value = regex("([-_\\w]+/[-_\\w]+)(\\.git)?$", local.code_repository_url)[0] + description = "The 'org/repo' string of the repo (e.g. 'navapbc/template-infra'). This is extracted from the repo URL (e.g. 'git@github.com:navapbc/template-infra.git' or 'https://github.com/navapbc/template-infra.git')" +} + +output "default_region" { + value = local.default_region +} + +# Common tags for all accounts and environments +output "default_tags" { + value = { + project = local.project_name + owner = local.owner + repository = local.code_repository_url + terraform = true + terraform_workspace = terraform.workspace + # description is set in each environments local use key project_description if required. + } +} + +output "github_actions_role_name" { + value = local.github_actions_role_name +} + +output "aws_services" { + description = "AWS services that this project uses" + value = local.aws_services +} + +output "aws_services_security_group_name_prefix" { + value = local.aws_services_security_group_name_prefix +} + +output "network_configs" { + value = local.network_configs +} diff --git a/infra/test/go.mod b/infra/test/go.mod new file mode 100644 index 00000000..f324ebbd --- /dev/null +++ b/infra/test/go.mod @@ -0,0 +1,59 @@ +module navapbc/template-infra + +go 1.19 + +require ( + github.com/gruntwork-io/terratest v0.41.0 + github.com/stretchr/testify v1.8.1 +) + +require ( + cloud.google.com/go v0.110.0 // indirect + cloud.google.com/go/compute v1.19.1 // indirect + cloud.google.com/go/compute/metadata v0.2.3 // indirect + cloud.google.com/go/iam v0.13.0 // indirect + cloud.google.com/go/storage v1.28.1 // indirect + github.com/agext/levenshtein v1.2.3 // indirect + github.com/apparentlymart/go-textseg/v13 v13.0.0 // indirect + github.com/aws/aws-sdk-go v1.44.122 // indirect + github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d // indirect + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect + github.com/golang/protobuf v1.5.3 // indirect + github.com/google/go-cmp v0.5.9 // indirect + github.com/google/uuid v1.3.0 // indirect + github.com/googleapis/enterprise-certificate-proxy v0.2.3 // indirect + github.com/googleapis/gax-go/v2 v2.7.1 // indirect + github.com/hashicorp/errwrap v1.0.0 // indirect + github.com/hashicorp/go-cleanhttp v0.5.2 // indirect + github.com/hashicorp/go-getter v1.7.0 // indirect + github.com/hashicorp/go-multierror v1.1.0 // indirect + github.com/hashicorp/go-safetemp v1.0.0 // indirect + github.com/hashicorp/go-version v1.6.0 // indirect + github.com/hashicorp/hcl/v2 v2.9.1 // indirect + github.com/hashicorp/terraform-json v0.13.0 // indirect + github.com/jinzhu/copier v0.0.0-20190924061706-b57f9002281a // indirect + github.com/jmespath/go-jmespath v0.4.0 // indirect + github.com/klauspost/compress v1.15.11 // indirect + github.com/mattn/go-zglob v0.0.2-0.20190814121620-e3c945676326 // indirect + github.com/mitchellh/go-homedir v1.1.0 // indirect + github.com/mitchellh/go-testing-interface v1.14.1 // indirect + github.com/mitchellh/go-wordwrap v1.0.1 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/tmccombs/hcl2json v0.3.3 // indirect + github.com/ulikunitz/xz v0.5.10 // indirect + github.com/zclconf/go-cty v1.9.1 // indirect + go.opencensus.io v0.24.0 // indirect + golang.org/x/crypto v0.17.0 // indirect + golang.org/x/net v0.10.0 // indirect + golang.org/x/oauth2 v0.7.0 // indirect + golang.org/x/sys v0.15.0 // indirect + golang.org/x/text v0.14.0 // indirect + golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect + google.golang.org/api v0.114.0 // indirect + google.golang.org/appengine v1.6.7 // indirect + google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 // indirect + google.golang.org/grpc v1.56.3 // indirect + google.golang.org/protobuf v1.30.0 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect +) diff --git a/infra/test/go.sum b/infra/test/go.sum new file mode 100644 index 00000000..52322c6a --- /dev/null +++ b/infra/test/go.sum @@ -0,0 +1,968 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= +cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= +cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= +cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= +cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= +cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= +cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= +cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= +cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= +cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= +cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= +cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= +cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY= +cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM= +cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY= +cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ= +cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI= +cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4= +cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc= +cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA= +cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w99A= +cloud.google.com/go v0.102.0/go.mod h1:oWcCzKlqJ5zgHQt9YsaeTY9KzIvjyy0ArmiBUgpQ+nc= +cloud.google.com/go v0.102.1/go.mod h1:XZ77E9qnTEnrgEOvr4xzfdX5TRo7fB4T2F4O6+34hIU= +cloud.google.com/go v0.104.0/go.mod h1:OO6xxXdJyvuJPcEPBLN9BJPD+jep5G1+2U5B5gkRYtA= +cloud.google.com/go v0.110.0 h1:Zc8gqp3+a9/Eyph2KDmcGaPtbKRIoqq4YTlL4NMD0Ys= +cloud.google.com/go v0.110.0/go.mod h1:SJnCLqQ0FCFGSZMUNUf84MV3Aia54kn7pi8st7tMzaY= +cloud.google.com/go/aiplatform v1.22.0/go.mod h1:ig5Nct50bZlzV6NvKaTwmplLLddFx0YReh9WfTO5jKw= +cloud.google.com/go/aiplatform v1.24.0/go.mod h1:67UUvRBKG6GTayHKV8DBv2RtR1t93YRu5B1P3x99mYY= +cloud.google.com/go/analytics v0.11.0/go.mod h1:DjEWCu41bVbYcKyvlws9Er60YE4a//bK6mnhWvQeFNI= +cloud.google.com/go/analytics v0.12.0/go.mod h1:gkfj9h6XRf9+TS4bmuhPEShsh3hH8PAZzm/41OOhQd4= +cloud.google.com/go/area120 v0.5.0/go.mod h1:DE/n4mp+iqVyvxHN41Vf1CR602GiHQjFPusMFW6bGR4= +cloud.google.com/go/area120 v0.6.0/go.mod h1:39yFJqWVgm0UZqWTOdqkLhjoC7uFfgXRC8g/ZegeAh0= +cloud.google.com/go/artifactregistry v1.6.0/go.mod h1:IYt0oBPSAGYj/kprzsBjZ/4LnG/zOcHyFHjWPCi6SAQ= +cloud.google.com/go/artifactregistry v1.7.0/go.mod h1:mqTOFOnGZx8EtSqK/ZWcsm/4U8B77rbcLP6ruDU2Ixk= +cloud.google.com/go/asset v1.5.0/go.mod h1:5mfs8UvcM5wHhqtSv8J1CtxxaQq3AdBxxQi2jGW/K4o= +cloud.google.com/go/asset v1.7.0/go.mod h1:YbENsRK4+xTiL+Ofoj5Ckf+O17kJtgp3Y3nn4uzZz5s= +cloud.google.com/go/asset v1.8.0/go.mod h1:mUNGKhiqIdbr8X7KNayoYvyc4HbbFO9URsjbytpUaW0= +cloud.google.com/go/assuredworkloads v1.5.0/go.mod h1:n8HOZ6pff6re5KYfBXcFvSViQjDwxFkAkmUFffJRbbY= +cloud.google.com/go/assuredworkloads v1.6.0/go.mod h1:yo2YOk37Yc89Rsd5QMVECvjaMKymF9OP+QXWlKXUkXw= +cloud.google.com/go/assuredworkloads v1.7.0/go.mod h1:z/736/oNmtGAyU47reJgGN+KVoYoxeLBoj4XkKYscNI= +cloud.google.com/go/automl v1.5.0/go.mod h1:34EjfoFGMZ5sgJ9EoLsRtdPSNZLcfflJR39VbVNS2M0= +cloud.google.com/go/automl v1.6.0/go.mod h1:ugf8a6Fx+zP0D59WLhqgTDsQI9w07o64uf/Is3Nh5p8= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= +cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= +cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= +cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= +cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +cloud.google.com/go/bigquery v1.42.0/go.mod h1:8dRTJxhtG+vwBKzE5OseQn/hiydoQN3EedCaOdYmxRA= +cloud.google.com/go/billing v1.4.0/go.mod h1:g9IdKBEFlItS8bTtlrZdVLWSSdSyFUZKXNS02zKMOZY= +cloud.google.com/go/billing v1.5.0/go.mod h1:mztb1tBc3QekhjSgmpf/CV4LzWXLzCArwpLmP2Gm88s= +cloud.google.com/go/binaryauthorization v1.1.0/go.mod h1:xwnoWu3Y84jbuHa0zd526MJYmtnVXn0syOjaJgy4+dM= +cloud.google.com/go/binaryauthorization v1.2.0/go.mod h1:86WKkJHtRcv5ViNABtYMhhNWRrD1Vpi//uKEy7aYEfI= +cloud.google.com/go/cloudtasks v1.5.0/go.mod h1:fD92REy1x5woxkKEkLdvavGnPJGEn8Uic9nWuLzqCpY= +cloud.google.com/go/cloudtasks v1.6.0/go.mod h1:C6Io+sxuke9/KNRkbQpihnW93SWDU3uXt92nu85HkYI= +cloud.google.com/go/compute v0.1.0/go.mod h1:GAesmwr110a34z04OlxYkATPBEfVhkymfTBXtfbBFow= +cloud.google.com/go/compute v1.3.0/go.mod h1:cCZiE1NHEtai4wiufUhW8I8S1JKkAnhnQJWM7YD99wM= +cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6mkzQJeu0M= +cloud.google.com/go/compute v1.6.0/go.mod h1:T29tfhtVbq1wvAPo0E3+7vhgmkOYeXjhFvz/FMzPu0s= +cloud.google.com/go/compute v1.6.1/go.mod h1:g85FgpzFvNULZ+S8AYq87axRKuf2Kh7deLqV/jJ3thU= +cloud.google.com/go/compute v1.7.0/go.mod h1:435lt8av5oL9P3fv1OEzSbSUe+ybHXGMPQHHZWZxy9U= +cloud.google.com/go/compute v1.10.0/go.mod h1:ER5CLbMxl90o2jtNbGSbtfOpQKR0t15FOtRsugnLrlU= +cloud.google.com/go/compute v1.19.1 h1:am86mquDUgjGNWxiGn+5PGLbmgiWXlE/yNWpIpNvuXY= +cloud.google.com/go/compute v1.19.1/go.mod h1:6ylj3a05WF8leseCdIf77NK0g1ey+nj5IKd5/kvShxE= +cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= +cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= +cloud.google.com/go/containeranalysis v0.5.1/go.mod h1:1D92jd8gRR/c0fGMlymRgxWD3Qw9C1ff6/T7mLgVL8I= +cloud.google.com/go/containeranalysis v0.6.0/go.mod h1:HEJoiEIu+lEXM+k7+qLCci0h33lX3ZqoYFdmPcoO7s4= +cloud.google.com/go/datacatalog v1.3.0/go.mod h1:g9svFY6tuR+j+hrTw3J2dNcmI0dzmSiyOzm8kpLq0a0= +cloud.google.com/go/datacatalog v1.5.0/go.mod h1:M7GPLNQeLfWqeIm3iuiruhPzkt65+Bx8dAKvScX8jvs= +cloud.google.com/go/datacatalog v1.6.0/go.mod h1:+aEyF8JKg+uXcIdAmmaMUmZ3q1b/lKLtXCmXdnc0lbc= +cloud.google.com/go/dataflow v0.6.0/go.mod h1:9QwV89cGoxjjSR9/r7eFDqqjtvbKxAK2BaYU6PVk9UM= +cloud.google.com/go/dataflow v0.7.0/go.mod h1:PX526vb4ijFMesO1o202EaUmouZKBpjHsTlCtB4parQ= +cloud.google.com/go/dataform v0.3.0/go.mod h1:cj8uNliRlHpa6L3yVhDOBrUXH+BPAO1+KFMQQNSThKo= +cloud.google.com/go/dataform v0.4.0/go.mod h1:fwV6Y4Ty2yIFL89huYlEkwUPtS7YZinZbzzj5S9FzCE= +cloud.google.com/go/datalabeling v0.5.0/go.mod h1:TGcJ0G2NzcsXSE/97yWjIZO0bXj0KbVlINXMG9ud42I= +cloud.google.com/go/datalabeling v0.6.0/go.mod h1:WqdISuk/+WIGeMkpw/1q7bK/tFEZxsrFJOJdY2bXvTQ= +cloud.google.com/go/dataqna v0.5.0/go.mod h1:90Hyk596ft3zUQ8NkFfvICSIfHFh1Bc7C4cK3vbhkeo= +cloud.google.com/go/dataqna v0.6.0/go.mod h1:1lqNpM7rqNLVgWBJyk5NF6Uen2PHym0jtVJonplVsDA= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/datastream v1.2.0/go.mod h1:i/uTP8/fZwgATHS/XFu0TcNUhuA0twZxxQ3EyCUQMwo= +cloud.google.com/go/datastream v1.3.0/go.mod h1:cqlOX8xlyYF/uxhiKn6Hbv6WjwPPuI9W2M9SAXwaLLQ= +cloud.google.com/go/dialogflow v1.15.0/go.mod h1:HbHDWs33WOGJgn6rfzBW1Kv807BE3O1+xGbn59zZWI4= +cloud.google.com/go/dialogflow v1.16.1/go.mod h1:po6LlzGfK+smoSmTBnbkIZY2w8ffjz/RcGSS+sh1el0= +cloud.google.com/go/dialogflow v1.17.0/go.mod h1:YNP09C/kXA1aZdBgC/VtXX74G/TKn7XVCcVumTflA+8= +cloud.google.com/go/documentai v1.7.0/go.mod h1:lJvftZB5NRiFSX4moiye1SMxHx0Bc3x1+p9e/RfXYiU= +cloud.google.com/go/documentai v1.8.0/go.mod h1:xGHNEB7CtsnySCNrCFdCyyMz44RhFEEX2Q7UD0c5IhU= +cloud.google.com/go/domains v0.6.0/go.mod h1:T9Rz3GasrpYk6mEGHh4rymIhjlnIuB4ofT1wTxDeT4Y= +cloud.google.com/go/domains v0.7.0/go.mod h1:PtZeqS1xjnXuRPKE/88Iru/LdfoRyEHYA9nFQf4UKpg= +cloud.google.com/go/edgecontainer v0.1.0/go.mod h1:WgkZ9tp10bFxqO8BLPqv2LlfmQF1X8lZqwW4r1BTajk= +cloud.google.com/go/edgecontainer v0.2.0/go.mod h1:RTmLijy+lGpQ7BXuTDa4C4ssxyXT34NIuHIgKuP4s5w= +cloud.google.com/go/functions v1.6.0/go.mod h1:3H1UA3qiIPRWD7PeZKLvHZ9SaQhR26XIJcC0A5GbvAk= +cloud.google.com/go/functions v1.7.0/go.mod h1:+d+QBcWM+RsrgZfV9xo6KfA1GlzJfxcfZcRPEhDDfzg= +cloud.google.com/go/gaming v1.5.0/go.mod h1:ol7rGcxP/qHTRQE/RO4bxkXq+Fix0j6D4LFPzYTIrDM= +cloud.google.com/go/gaming v1.6.0/go.mod h1:YMU1GEvA39Qt3zWGyAVA9bpYz/yAhTvaQ1t2sK4KPUA= +cloud.google.com/go/gkeconnect v0.5.0/go.mod h1:c5lsNAg5EwAy7fkqX/+goqFsU1Da/jQFqArp+wGNr/o= +cloud.google.com/go/gkeconnect v0.6.0/go.mod h1:Mln67KyU/sHJEBY8kFZ0xTeyPtzbq9StAVvEULYK16A= +cloud.google.com/go/gkehub v0.9.0/go.mod h1:WYHN6WG8w9bXU0hqNxt8rm5uxnk8IH+lPY9J2TV7BK0= +cloud.google.com/go/gkehub v0.10.0/go.mod h1:UIPwxI0DsrpsVoWpLB0stwKCP+WFVG9+y977wO+hBH0= +cloud.google.com/go/grafeas v0.2.0/go.mod h1:KhxgtF2hb0P191HlY5besjYm6MqTSTj3LSI+M+ByZHc= +cloud.google.com/go/iam v0.3.0/go.mod h1:XzJPvDayI+9zsASAFO68Hk07u3z+f+JrT2xXNdp4bnY= +cloud.google.com/go/iam v0.5.0/go.mod h1:wPU9Vt0P4UmCux7mqtRu6jcpPAb74cP1fh50J3QpkUc= +cloud.google.com/go/iam v0.13.0 h1:+CmB+K0J/33d0zSQ9SlFWUeCCEn5XJA0ZMZ3pHE9u8k= +cloud.google.com/go/iam v0.13.0/go.mod h1:ljOg+rcNfzZ5d6f1nAUJ8ZIxOaZUVoS14bKCtaLZ/D0= +cloud.google.com/go/language v1.4.0/go.mod h1:F9dRpNFQmJbkaop6g0JhSBXCNlO90e1KWx5iDdxbWic= +cloud.google.com/go/language v1.6.0/go.mod h1:6dJ8t3B+lUYfStgls25GusK04NLh3eDLQnWM3mdEbhI= +cloud.google.com/go/lifesciences v0.5.0/go.mod h1:3oIKy8ycWGPUyZDR/8RNnTOYevhaMLqh5vLUXs9zvT8= +cloud.google.com/go/lifesciences v0.6.0/go.mod h1:ddj6tSX/7BOnhxCSd3ZcETvtNr8NZ6t/iPhY2Tyfu08= +cloud.google.com/go/longrunning v0.4.1 h1:v+yFJOfKC3yZdY6ZUI933pIYdhyhV8S3NpWrXWmg7jM= +cloud.google.com/go/mediatranslation v0.5.0/go.mod h1:jGPUhGTybqsPQn91pNXw0xVHfuJ3leR1wj37oU3y1f4= +cloud.google.com/go/mediatranslation v0.6.0/go.mod h1:hHdBCTYNigsBxshbznuIMFNe5QXEowAuNmmC7h8pu5w= +cloud.google.com/go/memcache v1.4.0/go.mod h1:rTOfiGZtJX1AaFUrOgsMHX5kAzaTQ8azHiuDoTPzNsE= +cloud.google.com/go/memcache v1.5.0/go.mod h1:dk3fCK7dVo0cUU2c36jKb4VqKPS22BTkf81Xq617aWM= +cloud.google.com/go/metastore v1.5.0/go.mod h1:2ZNrDcQwghfdtCwJ33nM0+GrBGlVuh8rakL3vdPY3XY= +cloud.google.com/go/metastore v1.6.0/go.mod h1:6cyQTls8CWXzk45G55x57DVQ9gWg7RiH65+YgPsNh9s= +cloud.google.com/go/networkconnectivity v1.4.0/go.mod h1:nOl7YL8odKyAOtzNX73/M5/mGZgqqMeryi6UPZTk/rA= +cloud.google.com/go/networkconnectivity v1.5.0/go.mod h1:3GzqJx7uhtlM3kln0+x5wyFvuVH1pIBJjhCpjzSt75o= +cloud.google.com/go/networksecurity v0.5.0/go.mod h1:xS6fOCoqpVC5zx15Z/MqkfDwH4+m/61A3ODiDV1xmiQ= +cloud.google.com/go/networksecurity v0.6.0/go.mod h1:Q5fjhTr9WMI5mbpRYEbiexTzROf7ZbDzvzCrNl14nyU= +cloud.google.com/go/notebooks v1.2.0/go.mod h1:9+wtppMfVPUeJ8fIWPOq1UnATHISkGXGqTkxeieQ6UY= +cloud.google.com/go/notebooks v1.3.0/go.mod h1:bFR5lj07DtCPC7YAAJ//vHskFBxA5JzYlH68kXVdk34= +cloud.google.com/go/osconfig v1.7.0/go.mod h1:oVHeCeZELfJP7XLxcBGTMBvRO+1nQ5tFG9VQTmYS2Fs= +cloud.google.com/go/osconfig v1.8.0/go.mod h1:EQqZLu5w5XA7eKizepumcvWx+m8mJUhEwiPqWiZeEdg= +cloud.google.com/go/oslogin v1.4.0/go.mod h1:YdgMXWRaElXz/lDk1Na6Fh5orF7gvmJ0FGLIs9LId4E= +cloud.google.com/go/oslogin v1.5.0/go.mod h1:D260Qj11W2qx/HVF29zBg+0fd6YCSjSqLUkY/qEenQU= +cloud.google.com/go/phishingprotection v0.5.0/go.mod h1:Y3HZknsK9bc9dMi+oE8Bim0lczMU6hrX0UpADuMefr0= +cloud.google.com/go/phishingprotection v0.6.0/go.mod h1:9Y3LBLgy0kDTcYET8ZH3bq/7qni15yVUoAxiFxnlSUA= +cloud.google.com/go/privatecatalog v0.5.0/go.mod h1:XgosMUvvPyxDjAVNDYxJ7wBW8//hLDDYmnsNcMGq1K0= +cloud.google.com/go/privatecatalog v0.6.0/go.mod h1:i/fbkZR0hLN29eEWiiwue8Pb+GforiEIBnV9yrRUOKI= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= +cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +cloud.google.com/go/recaptchaenterprise v1.3.1/go.mod h1:OdD+q+y4XGeAlxRaMn1Y7/GveP6zmq76byL6tjPE7d4= +cloud.google.com/go/recaptchaenterprise/v2 v2.1.0/go.mod h1:w9yVqajwroDNTfGuhmOjPDN//rZGySaf6PtFVcSCa7o= +cloud.google.com/go/recaptchaenterprise/v2 v2.2.0/go.mod h1:/Zu5jisWGeERrd5HnlS3EUGb/D335f9k51B/FVil0jk= +cloud.google.com/go/recaptchaenterprise/v2 v2.3.0/go.mod h1:O9LwGCjrhGHBQET5CA7dd5NwwNQUErSgEDit1DLNTdo= +cloud.google.com/go/recommendationengine v0.5.0/go.mod h1:E5756pJcVFeVgaQv3WNpImkFP8a+RptV6dDLGPILjvg= +cloud.google.com/go/recommendationengine v0.6.0/go.mod h1:08mq2umu9oIqc7tDy8sx+MNJdLG0fUi3vaSVbztHgJ4= +cloud.google.com/go/recommender v1.5.0/go.mod h1:jdoeiBIVrJe9gQjwd759ecLJbxCDED4A6p+mqoqDvTg= +cloud.google.com/go/recommender v1.6.0/go.mod h1:+yETpm25mcoiECKh9DEScGzIRyDKpZ0cEhWGo+8bo+c= +cloud.google.com/go/redis v1.7.0/go.mod h1:V3x5Jq1jzUcg+UNsRvdmsfuFnit1cfe3Z/PGyq/lm4Y= +cloud.google.com/go/redis v1.8.0/go.mod h1:Fm2szCDavWzBk2cDKxrkmWBqoCiL1+Ctwq7EyqBCA/A= +cloud.google.com/go/retail v1.8.0/go.mod h1:QblKS8waDmNUhghY2TI9O3JLlFk8jybHeV4BF19FrE4= +cloud.google.com/go/retail v1.9.0/go.mod h1:g6jb6mKuCS1QKnH/dpu7isX253absFl6iE92nHwlBUY= +cloud.google.com/go/scheduler v1.4.0/go.mod h1:drcJBmxF3aqZJRhmkHQ9b3uSSpQoltBPGPxGAWROx6s= +cloud.google.com/go/scheduler v1.5.0/go.mod h1:ri073ym49NW3AfT6DZi21vLZrG07GXr5p3H1KxN5QlI= +cloud.google.com/go/secretmanager v1.6.0/go.mod h1:awVa/OXF6IiyaU1wQ34inzQNc4ISIDIrId8qE5QGgKA= +cloud.google.com/go/security v1.5.0/go.mod h1:lgxGdyOKKjHL4YG3/YwIL2zLqMFCKs0UbQwgyZmfJl4= +cloud.google.com/go/security v1.7.0/go.mod h1:mZklORHl6Bg7CNnnjLH//0UlAlaXqiG7Lb9PsPXLfD0= +cloud.google.com/go/security v1.8.0/go.mod h1:hAQOwgmaHhztFhiQ41CjDODdWP0+AE1B3sX4OFlq+GU= +cloud.google.com/go/securitycenter v1.13.0/go.mod h1:cv5qNAqjY84FCN6Y9z28WlkKXyWsgLO832YiWwkCWcU= +cloud.google.com/go/securitycenter v1.14.0/go.mod h1:gZLAhtyKv85n52XYWt6RmeBdydyxfPeTrpToDPw4Auc= +cloud.google.com/go/servicedirectory v1.4.0/go.mod h1:gH1MUaZCgtP7qQiI+F+A+OpeKF/HQWgtAddhTbhL2bs= +cloud.google.com/go/servicedirectory v1.5.0/go.mod h1:QMKFL0NUySbpZJ1UZs3oFAmdvVxhhxB6eJ/Vlp73dfg= +cloud.google.com/go/speech v1.6.0/go.mod h1:79tcr4FHCimOp56lwC01xnt/WPJZc4v3gzyT7FoBkCM= +cloud.google.com/go/speech v1.7.0/go.mod h1:KptqL+BAQIhMsj1kOP2la5DSEEerPDuOP/2mmkhHhZQ= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= +cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= +cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +cloud.google.com/go/storage v1.22.1/go.mod h1:S8N1cAStu7BOeFfE8KAQzmyyLkK8p/vmRq6kuBTW58Y= +cloud.google.com/go/storage v1.23.0/go.mod h1:vOEEDNFnciUMhBeT6hsJIn3ieU5cFRmzeLgDvXzfIXc= +cloud.google.com/go/storage v1.27.0/go.mod h1:x9DOL8TK/ygDUMieqwfhdpQryTeEkhGKMi80i/iqR2s= +cloud.google.com/go/storage v1.28.1 h1:F5QDG5ChchaAVQhINh24U99OWHURqrW8OmQcGKXcbgI= +cloud.google.com/go/storage v1.28.1/go.mod h1:Qnisd4CqDdo6BGs2AD5LLnEsmSQ80wQ5ogcBBKhU86Y= +cloud.google.com/go/talent v1.1.0/go.mod h1:Vl4pt9jiHKvOgF9KoZo6Kob9oV4lwd/ZD5Cto54zDRw= +cloud.google.com/go/talent v1.2.0/go.mod h1:MoNF9bhFQbiJ6eFD3uSsg0uBALw4n4gaCaEjBw9zo8g= +cloud.google.com/go/videointelligence v1.6.0/go.mod h1:w0DIDlVRKtwPCn/C4iwZIJdvC69yInhW0cfi+p546uU= +cloud.google.com/go/videointelligence v1.7.0/go.mod h1:k8pI/1wAhjznARtVT9U1llUaFNPh7muw8QyOUpavru4= +cloud.google.com/go/vision v1.2.0/go.mod h1:SmNwgObm5DpFBme2xpyOyasvBc1aPdjvMk2bBk0tKD0= +cloud.google.com/go/vision/v2 v2.2.0/go.mod h1:uCdV4PpN1S0jyCyq8sIM42v2Y6zOLkZs+4R9LrGYwFo= +cloud.google.com/go/vision/v2 v2.3.0/go.mod h1:UO61abBx9QRMFkNBbf1D8B1LXdS2cGiiCRx0vSpZoUo= +cloud.google.com/go/webrisk v1.4.0/go.mod h1:Hn8X6Zr+ziE2aNd8SliSDWpEnSS1u4R9+xXZmFiHmGE= +cloud.google.com/go/webrisk v1.5.0/go.mod h1:iPG6fr52Tv7sGk0H6qUFzmL3HHZev1htXuWDEEsqMTg= +cloud.google.com/go/workflows v1.6.0/go.mod h1:6t9F5h/unJz41YqfBmqSASJSXccBLtD1Vwf+KmJENM0= +cloud.google.com/go/workflows v1.7.0/go.mod h1:JhSrZuVZWuiDfKEFxU0/F1PQjmpnpcoISEXH2bcHC3M= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/agext/levenshtein v1.2.1/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= +github.com/agext/levenshtein v1.2.3 h1:YB2fHEn0UJagG8T1rrWknE3ZQzWM06O8AMAatNn7lmo= +github.com/agext/levenshtein v1.2.3/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= +github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= +github.com/apparentlymart/go-dump v0.0.0-20180507223929-23540a00eaa3/go.mod h1:oL81AME2rN47vu18xqj1S1jPIPuN7afo62yKTNn3XMM= +github.com/apparentlymart/go-textseg v1.0.0/go.mod h1:z96Txxhf3xSFMPmb5X/1W05FF/Nj9VFpLOpjS5yuumk= +github.com/apparentlymart/go-textseg/v13 v13.0.0 h1:Y+KvPE1NYz0xl601PVImeQfFyEy6iT90AvPUL1NNfNw= +github.com/apparentlymart/go-textseg/v13 v13.0.0/go.mod h1:ZK2fH7c4NqDTLtiYLvIkEghdlcqw7yxLeM89kiTRPUo= +github.com/aws/aws-sdk-go v1.44.122 h1:p6mw01WBaNpbdP2xrisz5tIkcNwzj/HysobNoaAHjgo= +github.com/aws/aws-sdk-go v1.44.122/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo= +github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d h1:xDfNPAt8lFiC1UJrqV3uuy861HCTo708pDMbjHHdCas= +github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d/go.mod h1:6QX/PXZ00z/TKoufEY6K/a0k6AhaJrQKdFe6OfVXsa4= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cheggaaa/pb v1.0.27/go.mod h1:pQciLPpbU0oxA0h+VJYYLxO+XeDQb5pZijXscXHm81s= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= +github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= +github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= +github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= +github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-test/deep v1.0.3/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= +github.com/go-test/deep v1.0.7 h1:/VSMRlnY/JSyqxQUzQLKVMAskpY/NZKFA5j2P+0pP2M= +github.com/go-test/deep v1.0.7/go.mod h1:QV8Hv/iy04NyLBxAdO9njL0iVPN1S4d/A3NVv1V36o8= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= +github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= +github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= +github.com/golang/protobuf v1.1.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= +github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= +github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= +github.com/google/martian/v3 v3.3.2 h1:IqNFLAmvJOgVlpdEBiQbDc2EwKW77amAycfTuWKdfvw= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= +github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/enterprise-certificate-proxy v0.0.0-20220520183353-fd19c99a87aa/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= +github.com/googleapis/enterprise-certificate-proxy v0.1.0/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= +github.com/googleapis/enterprise-certificate-proxy v0.2.0/go.mod h1:8C0jb7/mgJe/9KK8Lm7X9ctZC2t60YyIpYEI16jx0Qg= +github.com/googleapis/enterprise-certificate-proxy v0.2.3 h1:yk9/cqRKtT9wXZSsRH9aurXEpJX+U6FLtpYTdC3R06k= +github.com/googleapis/enterprise-certificate-proxy v0.2.3/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= +github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM= +github.com/googleapis/gax-go/v2 v2.2.0/go.mod h1:as02EH8zWkzwUoLbBaFeQ+arQaj/OthfcblKl4IGNaM= +github.com/googleapis/gax-go/v2 v2.3.0/go.mod h1:b8LNqSzNabLiUpXKkY7HAR5jr6bIT99EXz9pXxye9YM= +github.com/googleapis/gax-go/v2 v2.4.0/go.mod h1:XOTVJ59hdnfJLIP/dh8n5CGryZR2LxK9wbMD5+iXC6c= +github.com/googleapis/gax-go/v2 v2.5.1/go.mod h1:h6B0KMMFNtI2ddbGJn3T3ZbwkeT6yqEF02fYlzkUCyo= +github.com/googleapis/gax-go/v2 v2.6.0/go.mod h1:1mjbznJAPHFpesgE5ucqfYEscaz5kMdcIDwU/6+DDoY= +github.com/googleapis/gax-go/v2 v2.7.1 h1:gF4c0zjUP2H/s/hEGyLA3I0fA2ZWjzYiONAD6cvPr8A= +github.com/googleapis/gax-go/v2 v2.7.1/go.mod h1:4orTrqY6hXxxaUL4LHIPl6lGo8vAE38/qKbhSAKP6QI= +github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4= +github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/gruntwork-io/terratest v0.41.0 h1:QKFK6m0EMVnrV7lw2L06TlG+Ha3t0CcOXuBVywpeNRU= +github.com/gruntwork-io/terratest v0.41.0/go.mod h1:qH1xkPTTGx30XkMHw8jAVIbzqheSjIa5IyiTwSV2vKI= +github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= +github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= +github.com/hashicorp/go-getter v1.7.0 h1:bzrYP+qu/gMrL1au7/aDvkoOVGUJpeKBgbqRHACAFDY= +github.com/hashicorp/go-getter v1.7.0/go.mod h1:W7TalhMmbPmsSMdNjD0ZskARur/9GJ17cfHTRtXV744= +github.com/hashicorp/go-multierror v1.1.0 h1:B9UzwGQJehnUY1yNrnwREHc3fGbC2xefo8g4TbElacI= +github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA= +github.com/hashicorp/go-safetemp v1.0.0 h1:2HR189eFNrjHQyENnQMMpCiBAsRxzbTMIgBhEyExpmo= +github.com/hashicorp/go-safetemp v1.0.0/go.mod h1:oaerMy3BhqiTbVye6QuFhFtIceqFoDHxNAB65b+Rj1I= +github.com/hashicorp/go-version v1.3.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek= +github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/hcl/v2 v2.9.1 h1:eOy4gREY0/ZQHNItlfuEZqtcQbXIxzojlP301hDpnac= +github.com/hashicorp/hcl/v2 v2.9.1/go.mod h1:FwWsfWEjyV/CMj8s/gqAuiviY72rJ1/oayI9WftqcKg= +github.com/hashicorp/terraform-json v0.13.0 h1:Li9L+lKD1FO5RVFRM1mMMIBDoUHslOniyEi5CM+FWGY= +github.com/hashicorp/terraform-json v0.13.0/go.mod h1:y5OdLBCT+rxbwnpxZs9kGL7R9ExU76+cpdY8zHwoazk= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/jinzhu/copier v0.0.0-20190924061706-b57f9002281a h1:zPPuIq2jAWWPTrGt70eK/BSch+gFAGrNzecsoENgu2o= +github.com/jinzhu/copier v0.0.0-20190924061706-b57f9002281a/go.mod h1:yL958EeXv8Ylng6IfnvG4oflryUi3vgA3xPs9hmII1s= +github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= +github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= +github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= +github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.15.11 h1:Lcadnb3RKGin4FYM/orgq0qde+nc15E5Cbqg4B9Sx9c= +github.com/klauspost/compress v1.15.11/go.mod h1:QPwzmACJjUTFsnSHH934V6woptycfrDDJnH7hvFVbGM= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= +github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/mattn/go-zglob v0.0.2-0.20190814121620-e3c945676326 h1:ofNAzWCcyTALn2Zv40+8XitdzCgXY6e9qvXwN9W0YXg= +github.com/mattn/go-zglob v0.0.2-0.20190814121620-e3c945676326/go.mod h1:9fxibJccNxU2cnpIKLRRFA7zX7qhkJIQWBb449FYHOo= +github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= +github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-testing-interface v1.14.1 h1:jrgshOhYAUVNMAJiKbEu7EqAwgJJ2JqpQmpLJOu07cU= +github.com/mitchellh/go-testing-interface v1.14.1/go.mod h1:gfgS7OtZj6MA4U1UrDRp04twqAjfvlZyCfX3sDjEym8= +github.com/mitchellh/go-wordwrap v0.0.0-20150314170334-ad45545899c7/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= +github.com/mitchellh/go-wordwrap v1.0.1 h1:TLuKupo69TCn6TQSyGxwI1EblZZEsQ0vMlAFQflz0v0= +github.com/mitchellh/go-wordwrap v1.0.1/go.mod h1:R62XHJLzvMFRBbcrT7m7WgmE1eOyTSsCt+hzestvNj0= +github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/sebdah/goldie v1.0.0/go.mod h1:jXP4hmWywNEwZzhMuv2ccnqTSFpuq8iyQhtQdkkZBH4= +github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/pflag v1.0.2/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/tmccombs/hcl2json v0.3.3 h1:+DLNYqpWE0CsOQiEZu+OZm5ZBImake3wtITYxQ8uLFQ= +github.com/tmccombs/hcl2json v0.3.3/go.mod h1:Y2chtz2x9bAeRTvSibVRVgbLJhLJXKlUeIvjeVdnm4w= +github.com/ulikunitz/xz v0.5.10 h1:t92gobL9l3HE202wg3rlk19F6X+JOxl9BBrCCMYEYd8= +github.com/ulikunitz/xz v0.5.10/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= +github.com/vmihailenco/msgpack v3.3.3+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk= +github.com/vmihailenco/msgpack/v4 v4.3.12/go.mod h1:gborTTJjAo/GWTqqRjrLCn9pgNN+NXzzngzBKDPIqw4= +github.com/vmihailenco/tagparser v0.1.1/go.mod h1:OeAg3pn3UbLjkWt+rN9oFYB6u/cQgqMEUPoW2WPyhdI= +github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +github.com/zclconf/go-cty v1.2.0/go.mod h1:hOPWgoHbaTUnI5k4D2ld+GRpFJSCe6bCM7m1q/N4PQ8= +github.com/zclconf/go-cty v1.8.0/go.mod h1:vVKLxnk3puL4qRAv72AO+W99LUD4da90g3uUAzyuvAk= +github.com/zclconf/go-cty v1.8.1/go.mod h1:vVKLxnk3puL4qRAv72AO+W99LUD4da90g3uUAzyuvAk= +github.com/zclconf/go-cty v1.9.1 h1:viqrgQwFl5UpSxc046qblj78wZXVDFnSOufaOTER+cc= +github.com/zclconf/go-cty v1.9.1/go.mod h1:vVKLxnk3puL4qRAv72AO+W99LUD4da90g3uUAzyuvAk= +github.com/zclconf/go-cty-debug v0.0.0-20191215020915-b22d67c1ba0b/go.mod h1:ZRKQfBXbGkpdV6QMzT3rU1kSTAnfu1dO8dPKjYprgj8= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= +go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= +go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= +go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= +go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190426145343-a29dc8fdc734/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.17.0 h1:r8bRNjWL3GshPW3gkd+RpvzWrZAwPS49OmTGZ/uhM4k= +golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180811021610-c39426892332/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220617184016-355a448f1bc9/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220909164309-bea034e7d591/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.0.0-20221014081412-f15817d10f9b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= +golang.org/x/net v0.10.0 h1:X2//UzNDwYmtCLn7To6G58Wr6f5ahEAQgKNzv9Y951M= +golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.0.0-20220608161450-d0670ef3b1eb/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= +golang.org/x/oauth2 v0.0.0-20220622183110-fd043fe589d2/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= +golang.org/x/oauth2 v0.0.0-20220822191816-0ebed06d0094/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.0.0-20220909003341-f21342109be1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.1.0/go.mod h1:G9FE4dLTsbXUu90h/Pf85g4w1D+SSAgR+q46nJZ8M4A= +golang.org/x/oauth2 v0.7.0 h1:qe6s0zUXlPX80/dITx3440hWZ7GwMwgDDyrSGTPJG/g= +golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220929204114-8fcdb60fdcc0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502175342-a43fa875dd82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220502124256-b6088ccd6cba/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220624220833-87e55d714810/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc= +golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.15.0 h1:y/Oo/a/q3IXu26lQgl04j/gjuBDOBlx7X6Om1j2CPW4= +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= +golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= +golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 h1:H2TDz8ibqkAF6YGhCdN3jS9O0/s90v0rJh3X/OLHEUk= +golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= +google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= +google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= +google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= +google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= +google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= +google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= +google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo= +google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4= +google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw= +google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU= +google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k= +google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= +google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= +google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI= +google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I= +google.golang.org/api v0.63.0/go.mod h1:gs4ij2ffTRXwuzzgJl/56BdwJaA194ijkfn++9tDuPo= +google.golang.org/api v0.67.0/go.mod h1:ShHKP8E60yPsKNw/w8w+VYaj9H6buA5UqDp8dhbQZ6g= +google.golang.org/api v0.70.0/go.mod h1:Bs4ZM2HGifEvXwd50TtW70ovgJffJYw2oRCOFU/SkfA= +google.golang.org/api v0.71.0/go.mod h1:4PyU6e6JogV1f9eA4voyrTY2batOLdgZ5qZ5HOCc4j8= +google.golang.org/api v0.74.0/go.mod h1:ZpfMZOVRMywNyvJFeqL9HRWBgAuRfSjJFpe9QtRRyDs= +google.golang.org/api v0.75.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA= +google.golang.org/api v0.77.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA= +google.golang.org/api v0.78.0/go.mod h1:1Sg78yoMLOhlQTeF+ARBoytAcH1NNyyl390YMy6rKmw= +google.golang.org/api v0.80.0/go.mod h1:xY3nI94gbvBrE0J6NHXhxOmW97HG7Khjkku6AFB3Hyg= +google.golang.org/api v0.84.0/go.mod h1:NTsGnUFJMYROtiquksZHBWtHfeMC7iYthki7Eq3pa8o= +google.golang.org/api v0.85.0/go.mod h1:AqZf8Ep9uZ2pyTvgL+x0D3Zt0eoT9b5E8fmzfu6FO2g= +google.golang.org/api v0.90.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw= +google.golang.org/api v0.93.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw= +google.golang.org/api v0.95.0/go.mod h1:eADj+UBuxkh5zlrSntJghuNeg8HwQ1w5lTKkuqaETEI= +google.golang.org/api v0.96.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= +google.golang.org/api v0.97.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= +google.golang.org/api v0.98.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= +google.golang.org/api v0.100.0/go.mod h1:ZE3Z2+ZOr87Rx7dqFsdRQkRBk36kDtp/h+QpHbB7a70= +google.golang.org/api v0.114.0 h1:1xQPji6cO2E2vLiI+C/XiFAnsn1WV3mjaEwGLhi3grE= +google.golang.org/api v0.114.0/go.mod h1:ifYI2ZsFK6/uGddGfAD5BMxlnkBqCmqHSDUVi45N5Yg= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= +google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= +google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210329143202-679c6ae281ee/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= +google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= +google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= +google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24= +google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= +google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= +google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= +google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= +google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w= +google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211221195035-429b39de9b1c/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220126215142-9970aeb2e350/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220207164111-0872dc986b00/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220218161850-94dd64e39d7c/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220222213610-43724f9ea8cf/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220304144024-325a89244dc8/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220310185008-1973136f34c6/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220324131243-acbaeb5b85eb/go.mod h1:hAL49I2IFola2sVEjAn7MEwsja0xp51I0tlGAf9hz4E= +google.golang.org/genproto v0.0.0-20220407144326-9054f6ed7bac/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220413183235-5e96e2839df9/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220414192740-2d67ff6cf2b4/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220421151946-72621c1f0bd3/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220429170224-98d788798c3e/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220502173005-c8bf987b8c21/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220505152158-f39f71e6c8f3/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220518221133-4f43b3371335/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220523171625-347a074981d8/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220608133413-ed9918b62aac/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220616135557-88e70c0c3a90/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220617124728-180714bec0ad/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220624142145-8cd45d7dbd1f/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220628213854-d9e0b6570c03/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220722212130-b98a9ff5e252/go.mod h1:GkXuJDJ6aQ7lnJcRF+SJVgFdQhypqgl3LB1C9vabdRE= +google.golang.org/genproto v0.0.0-20220801145646-83ce21fca29f/go.mod h1:iHe1svFLAZg9VWz891+QbRMwUv9O/1Ww+/mngYeThbc= +google.golang.org/genproto v0.0.0-20220815135757-37a418bb8959/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220817144833-d7fd3f11b9b1/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220822174746-9e6da59bd2fc/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220829144015-23454907ede3/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220829175752-36a9c930ecbf/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220913154956-18f8339a66a5/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220914142337-ca0e39ece12f/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220915135415-7fd63a7952de/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220916172020-2692e8806bfa/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220919141832-68c03719ef51/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220920201722-2b89144ce006/go.mod h1:ht8XFiar2npT/g4vkk7O0WYS1sHOHbdujxbEp7CJWbw= +google.golang.org/genproto v0.0.0-20220926165614-551eb538f295/go.mod h1:woMGP53BroOrRY3xTxlbr8Y3eB/nzAvvFM83q7kG2OI= +google.golang.org/genproto v0.0.0-20220926220553-6981cbe3cfce/go.mod h1:woMGP53BroOrRY3xTxlbr8Y3eB/nzAvvFM83q7kG2OI= +google.golang.org/genproto v0.0.0-20221010155953-15ba04fc1c0e/go.mod h1:3526vdqwhZAwq4wsRUaVG555sVgsNmIjRtO7t/JH29U= +google.golang.org/genproto v0.0.0-20221014173430-6e2ab493f96b/go.mod h1:1vXfmgAz9N9Jx0QA82PqRVauvCz1SGSz739p0f183jM= +google.golang.org/genproto v0.0.0-20221014213838-99cd37c6964a/go.mod h1:1vXfmgAz9N9Jx0QA82PqRVauvCz1SGSz739p0f183jM= +google.golang.org/genproto v0.0.0-20221025140454-527a21cfbd71/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s= +google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 h1:KpwkzHKEF7B9Zxg18WzOa7djJ+Ha5DzthMyZYQfEn2A= +google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1/go.mod h1:nKE/iIaLqn2bQwXBg8f1g2Ylh6r5MN5CmZvuzZCgsCU= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= +google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= +google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= +google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= +google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= +google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= +google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= +google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= +google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= +google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= +google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.46.2/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.47.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.48.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.49.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= +google.golang.org/grpc v1.50.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= +google.golang.org/grpc v1.50.1/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= +google.golang.org/grpc v1.56.3 h1:8I4C0Yq1EjstUzUJzpcRVbuYA2mODtEmpWiQoN/b2nc= +google.golang.org/grpc v1.56.3/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s= +google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng= +google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/cheggaaa/pb.v1 v1.0.27/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= diff --git a/infra/test/helpers.go b/infra/test/helpers.go new file mode 100644 index 00000000..2c3c3a32 --- /dev/null +++ b/infra/test/helpers.go @@ -0,0 +1,20 @@ +// Common functions used by test files +package test + +import ( + "fmt" + "testing" + + "github.com/gruntwork-io/terratest/modules/terraform" +) + +// Wrapper function for terraform init using a passed in backend config file. This is needed since +// terratest currently does not support passing a file as the -backend-config option +// so we need to manually call terraform rather than using terraform.Init +// see https://github.com/gruntwork-io/terratest/issues/517 +// it looks like this PR would add functionality for this: https://github.com/gruntwork-io/terratest/pull/558 +// after which we add BackendConfig: []string{"dev.s3.tfbackend": terraform.KeyOnly} to terraformOptions +// and replace the call to terraform.RunTerraformCommand with terraform.Init +func TerraformInit(t *testing.T, terraformOptions *terraform.Options, backendConfig string) { + terraform.RunTerraformCommand(t, terraformOptions, "init", fmt.Sprintf("-backend-config=%s", backendConfig)) +} diff --git a/infra/test/infra_test.go b/infra/test/infra_test.go new file mode 100644 index 00000000..8f63ced9 --- /dev/null +++ b/infra/test/infra_test.go @@ -0,0 +1,152 @@ +package test + +import ( + "fmt" + "strings" + "testing" + "time" + + http_helper "github.com/gruntwork-io/terratest/modules/http-helper" + "github.com/gruntwork-io/terratest/modules/random" + "github.com/gruntwork-io/terratest/modules/shell" + "github.com/gruntwork-io/terratest/modules/terraform" + "github.com/stretchr/testify/require" +) + +var uniqueId = strings.ToLower(random.UniqueId()) +var workspaceName = fmt.Sprintf("t-%s", uniqueId) + +func TestService(t *testing.T) { + BuildAndPublish(t) + + imageTag := shell.RunCommandAndGetOutput(t, shell.Command{ + Command: "git", + Args: []string{"rev-parse", "HEAD"}, + WorkingDir: "./", + }) + terraformOptions := terraform.WithDefaultRetryableErrors(t, &terraform.Options{ + Reconfigure: true, + TerraformDir: "../app/service/", + Vars: map[string]interface{}{ + "environment_name": "dev", + "image_tag": imageTag, + }, + }) + + fmt.Println("::group::Initialize service module") + TerraformInit(t, terraformOptions, "dev.s3.tfbackend") + fmt.Println("::endgroup::") + + defer terraform.WorkspaceDelete(t, terraformOptions, workspaceName) + fmt.Println("::group::Select new terraform workspace") + terraform.WorkspaceSelectOrNew(t, terraformOptions, workspaceName) + fmt.Println("::endgroup::") + + defer DestroyService(t, terraformOptions) + fmt.Println("::group::Create service layer") + terraform.Apply(t, terraformOptions) + fmt.Println("::endgroup::") + + WaitForServiceToBeStable(t, workspaceName) + RunEndToEndTests(t, terraformOptions) +} + +func BuildAndPublish(t *testing.T) { + fmt.Println("::group::Initialize build-repository module") + // terratest currently does not support passing a file as the -backend-config option + // so we need to manually call terraform rather than using terraform.Init + // see https://github.com/gruntwork-io/terratest/issues/517 + // it looks like this PR would add functionality for this: https://github.com/gruntwork-io/terratest/pull/558 + // after which we add BackendConfig: []string{"dev.s3.tfbackend": terraform.KeyOnly} to terraformOptions + // and replace the call to terraform.RunTerraformCommand with terraform.Init + TerraformInit(t, &terraform.Options{ + TerraformDir: "../app/build-repository/", + }, "shared.s3.tfbackend") + fmt.Println("::endgroup::") + + fmt.Println("::group::Build release") + shell.RunCommand(t, shell.Command{ + Command: "make", + Args: []string{"release-build", "APP_NAME=app"}, + WorkingDir: "../../", + }) + fmt.Println("::endgroup::") + + fmt.Println("::group::Publish release") + shell.RunCommand(t, shell.Command{ + Command: "make", + Args: []string{"release-publish", "APP_NAME=app"}, + WorkingDir: "../../", + }) + fmt.Println("::endgroup::") +} + +func WaitForServiceToBeStable(t *testing.T, workspaceName string) { + fmt.Println("::group::Wait for service to be stable") + appName := "app" + environmentName := "dev" + serviceName := fmt.Sprintf("%s-%s-%s", workspaceName, appName, environmentName) + shell.RunCommand(t, shell.Command{ + Command: "aws", + Args: []string{"ecs", "wait", "services-stable", "--cluster", serviceName, "--services", serviceName}, + WorkingDir: "../../", + }) + fmt.Println("::endgroup::") +} + +func RunEndToEndTests(t *testing.T, terraformOptions *terraform.Options) { + fmt.Println("::group::Check service for healthy status 200") + serviceEndpoint := terraform.Output(t, terraformOptions, "service_endpoint") + http_helper.HttpGetWithRetryWithCustomValidation(t, serviceEndpoint, nil, 5, 1*time.Second, func(responseStatus int, responseBody string) bool { + return responseStatus == 200 + }) + fmt.Println("::endgroup::") +} + +func EnableDestroyService(t *testing.T, terraformOptions *terraform.Options) { + fmt.Println("::group::Set force_destroy = true and prevent_destroy = false for s3 buckets in service layer") + shell.RunCommand(t, shell.Command{ + Command: "sed", + Args: []string{ + "-i.bak", + "s/force_destroy = false/force_destroy = true/g", + "infra/modules/service/access-logs.tf", + }, + WorkingDir: "../../", + }) + shell.RunCommand(t, shell.Command{ + Command: "sed", + Args: []string{ + "-i.bak", + "s/prevent_destroy = true/prevent_destroy = false/g", + "infra/modules/service/access-logs.tf", + }, + WorkingDir: "../../", + }) + shell.RunCommand(t, shell.Command{ + Command: "sed", + Args: []string{ + "-i.bak", + "s/force_destroy = false/force_destroy = true/g", + "infra/modules/storage/main.tf", + }, + WorkingDir: "../../", + }) + + // Clone the options and set targets to only apply to the buckets + terraformOptions, err := terraformOptions.Clone() + require.NoError(t, err) + terraformOptions.Targets = []string{ + "module.service.aws_s3_bucket.access_logs", + "module.storage.aws_s3_bucket.storage", + } + terraform.Apply(t, terraformOptions) + fmt.Println("::endgroup::") +} + +func DestroyService(t *testing.T, terraformOptions *terraform.Options) { + EnableDestroyService(t, terraformOptions) + fmt.Println("::group::Destroy service layer") + terraform.Destroy(t, terraformOptions) + fmt.Println("::endgroup::") +}