diff --git a/.github/renovate.json b/.github/renovate.json index 8af344fa..1e7c11fd 100644 --- a/.github/renovate.json +++ b/.github/renovate.json @@ -1,22 +1,56 @@ { "extends": [ "config:base", - ":semanticCommitTypeAll(chore)" + ":semanticCommitTypeAll(chore)", + ":disableDependencyDashboard" + ], + "ignorePresets": [ + ":semanticPrefixFixDepsChoreOthers" ], - "ignorePresets": [":semanticPrefixFixDepsChoreOthers"], "prConcurrentLimit": 0, - "rebaseStalePrs": true, + "rebaseWhen": "behind-base-branch", "dependencyDashboard": true, - "semanticCommits": true, + "semanticCommits": "enabled", "postUpdateOptions": [ "gomodTidy" ], - "ignoreDeps": [ - "golang.org/x/net" + "packageRules": [ + { + "description": "Disable MAJOR update types", + "matchUpdateTypes": [ + "major" + ], + "enabled": false, + }, + { + "matchUpdateTypes": ["minor", "patch", "pin", "digest"], + "automerge": true, + "automergeType": "pr", + "automergeStrategy": "rebase" + } ], "force": { "constraints": { "go": "1.20" } - } + }, + "regexManagers": [ + { + "fileMatch": ["^Makefile$"], + "matchStrings": [ + "(?.*?_VERSION)\\s*=\\s*(?.*?)\\s*# renovate datasource=(?.*?) depName=(?.*?)\\n" + ], + "depNameTemplate": "{{{depName}}}", + "datasourceTemplate": "{{{datasource}}}", + "versioningTemplate": "semver" + }, + { + "fileMatch": ["podspec_updates.go$"], + "matchStrings": [ + "DefaultProxyImage = \"(?gcr.io/cloud-sql-connectors/cloud-sql-proxy):(?.*?)\"\\n" + ], + "depNameTemplate": "{{{depName}}}", + "datasourceTemplate": "docker", + } + ] } diff --git a/.github/workflows/release-please-updates.yaml b/.github/workflows/release-please-updates.yaml index c1091854..ca78add3 100644 --- a/.github/workflows/release-please-updates.yaml +++ b/.github/workflows/release-please-updates.yaml @@ -29,7 +29,7 @@ jobs: - name: Checkout code uses: actions/checkout@v3 with: - ref: ${{ github.event.pull_request.head.ref }} + ref: ${{ github.event.pull_request.head.sha }} repository: ${{ github.event.pull_request.head.repo.full_name }} - name: Generate code and commit differences run: tools/release-pr-generate.sh diff --git a/.github/workflows/build.yaml b/.github/workflows/tests.yaml similarity index 56% rename from .github/workflows/build.yaml rename to .github/workflows/tests.yaml index bcfd6b2c..b60e7df8 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/tests.yaml @@ -1,4 +1,4 @@ -# Copyright 2022 Google LLC +# Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,15 +12,15 @@ # See the License for the specific language governing permissions and # limitations under the License. -name: build +name: tests on: pull_request: pull_request_target: types: [labeled] jobs: - build: + unit: if: "${{ github.event.action != 'labeled' || github.event.label.name == 'tests: run' }}" - name: build and unit test + name: unit tests runs-on: ubuntu-latest steps: - name: Remove PR Label @@ -54,3 +54,45 @@ jobs: run: "make generate \nif ! git diff --exit-code --quiet ; then \n echo\n echo\n git diff --stat \"HEAD\"\n echo\n echo\n echo 'ERROR: Lint tools caused changes to the working dir. '\n exit 1\nfi\n" - name: make test run: make test + e2e: + needs: unit + if: "${{ github.event.action == 'labeled' && github.event.label.name == 'tests: run' }}" + name: e2e tests + runs-on: ubuntu-latest + permissions: + contents: 'read' + id-token: 'write' + steps: + - name: Checkout code + uses: actions/checkout@v3 + with: + ref: ${{ github.event.pull_request.head.sha }} + repository: ${{ github.event.pull_request.head.repo.full_name }} + - id: 'auth' + name: 'Authenticate to Google Cloud' + uses: 'google-github-actions/auth@v1.0.0' + with: + workload_identity_provider: ${{ secrets.PROVIDER_NAME }} + service_account: ${{ secrets.SERVICE_ACCOUNT }} + access_token_lifetime: 600s + project_id: ${{ secrets.GOOGLE_CLOUD_PROJECT }} + create_credentials_file: true + - name: 'Set up Cloud SDK' + uses: 'google-github-actions/setup-gcloud@v1' + - name: 'Setup Go' + uses: actions/setup-go@v3 + with: + go-version: '1.20' + - name: Set up QEMU + uses: docker/setup-qemu-action@v2 + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v2 + - id: 'e2e' + name: 'Run E2E Tests' + run: "./tools/e2e_test_job.sh" + env: + ENVIRONMENT_NAME: "ci-pr" + NODEPOOL_SERVICEACCOUNT_EMAIL: "${{secrets.NODEPOOL_SERVICEACCOUNT_EMAIL}}" + WORKLOAD_ID_SERVICEACCOUNT_EMAIL: "${{secrets.WORKLOAD_ID_SERVICEACCOUNT_EMAIL}}" + TFSTATE_STORAGE_BUCKET: "${{secrets.TFSTATE_STORAGE_BUCKET}}" + E2E_PROJECT_ID: "${{secrets.GOOGLE_CLOUD_PROJECT}}" diff --git a/.gitignore b/.gitignore index cec8772d..2a48b95b 100644 --- a/.gitignore +++ b/.gitignore @@ -28,3 +28,7 @@ testbin/* # Ignore secrets for the build /build.env + +# Ignore downloaded 3rd party license files +ThirdPartyLicenses + diff --git a/.golangci.yml b/.golangci.yml index 531b9240..8fed55f4 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -17,9 +17,15 @@ linters: enable: - goimports - revive + - importas issues: exclude-use-default: false linters-settings: + importas: + no-unaliased: true + alias: + - pkg: "github.com/GoogleCloudPlatform/cloud-sql-proxy-operator/internal/api/v1alpha1" + alias: "cloudsqlapi" revive: rules: - name: blank-imports diff --git a/CHANGELOG.md b/CHANGELOG.md index 2d0d55e9..a1c2cd13 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,37 @@ # Changelog +## [0.4.0](https://github.com/GoogleCloudPlatform/cloud-sql-proxy-operator/compare/v0.3.0...v0.4.0) (2023-03-28) + + +### ⚠ BREAKING CHANGES + +* Move to v1 for the AuthProxyWorkload api version. ([#258](https://github.com/GoogleCloudPlatform/cloud-sql-proxy-operator/issues/258)) + +### Features + +* Add configuration for the admin api port and debug. ([#213](https://github.com/GoogleCloudPlatform/cloud-sql-proxy-operator/issues/213)) ([0ddd681](https://github.com/GoogleCloudPlatform/cloud-sql-proxy-operator/commit/0ddd681407f2c2e2e8a6932e90ffda92ddc298c6)) +* Automatically update default images on operator startup. ([#254](https://github.com/GoogleCloudPlatform/cloud-sql-proxy-operator/issues/254)) ([2453be6](https://github.com/GoogleCloudPlatform/cloud-sql-proxy-operator/commit/2453be626a30eef930ca136c941b6a3cbb9cbe99)), closes [#87](https://github.com/GoogleCloudPlatform/cloud-sql-proxy-operator/issues/87) +* Configure --disable-metrics and --disable-telemetry flags. ([#222](https://github.com/GoogleCloudPlatform/cloud-sql-proxy-operator/issues/222)) ([5be6c3b](https://github.com/GoogleCloudPlatform/cloud-sql-proxy-operator/commit/5be6c3b5a15f0bba773b9f007d855581d84284b8)) +* Configure --quota-project flag on the proxy. ([#225](https://github.com/GoogleCloudPlatform/cloud-sql-proxy-operator/issues/225)) ([c3b4f1b](https://github.com/GoogleCloudPlatform/cloud-sql-proxy-operator/commit/c3b4f1bac3b958c40cc9af2816a017c75c8eb006)), closes [#45](https://github.com/GoogleCloudPlatform/cloud-sql-proxy-operator/issues/45) +* Configure Google Cloud Telemetry flags on the proxy. ([#223](https://github.com/GoogleCloudPlatform/cloud-sql-proxy-operator/issues/223)) ([76b0f39](https://github.com/GoogleCloudPlatform/cloud-sql-proxy-operator/commit/76b0f39b4b9a677446fd3a3948e345bfaad1c432)) +* Configure prometheus flags on the proxy. ([#224](https://github.com/GoogleCloudPlatform/cloud-sql-proxy-operator/issues/224)) ([a055d3b](https://github.com/GoogleCloudPlatform/cloud-sql-proxy-operator/commit/a055d3b79de5ac54ecf2af82b2bbdebcb1551307)) +* Move to v1 for the AuthProxyWorkload api version. ([#258](https://github.com/GoogleCloudPlatform/cloud-sql-proxy-operator/issues/258)) ([7b65d5c](https://github.com/GoogleCloudPlatform/cloud-sql-proxy-operator/commit/7b65d5cbb6ca3279d0dd71c95b439104a1e5b8ca)) +* Updating the RolloutStrategy field is not allowed. ([#212](https://github.com/GoogleCloudPlatform/cloud-sql-proxy-operator/issues/212)) ([f31b637](https://github.com/GoogleCloudPlatform/cloud-sql-proxy-operator/commit/f31b637373b073357431aec0d1b4176507e9a00c)) +* Validate instance fields ([#221](https://github.com/GoogleCloudPlatform/cloud-sql-proxy-operator/issues/221)) ([d516cc2](https://github.com/GoogleCloudPlatform/cloud-sql-proxy-operator/commit/d516cc2661b3854a69e267daafe68f4c1c3b73ad)), closes [#36](https://github.com/GoogleCloudPlatform/cloud-sql-proxy-operator/issues/36) +* When the operator's default proxy image changes, workload containers should be updated. ([#253](https://github.com/GoogleCloudPlatform/cloud-sql-proxy-operator/issues/253)) ([220c855](https://github.com/GoogleCloudPlatform/cloud-sql-proxy-operator/commit/220c8555d42fbc4f82e029ce1edd46c7a92648af)) + + +### Bug Fixes + +* Only process owner references for known kinds of owners. ([#245](https://github.com/GoogleCloudPlatform/cloud-sql-proxy-operator/issues/245)) ([12be1dc](https://github.com/GoogleCloudPlatform/cloud-sql-proxy-operator/commit/12be1dc2d6bf40f987200dfdec761e7b121b00c1)), closes [#244](https://github.com/GoogleCloudPlatform/cloud-sql-proxy-operator/issues/244) +* Repair a bad merge of tool versions in the Makefile. ([#249](https://github.com/GoogleCloudPlatform/cloud-sql-proxy-operator/issues/249)) ([f2ba903](https://github.com/GoogleCloudPlatform/cloud-sql-proxy-operator/commit/f2ba903f91956ed4b2b923c7adcb846ca9162a26)) +* Validate the AdminServer.EnableAPIs[] field properly. ([#263](https://github.com/GoogleCloudPlatform/cloud-sql-proxy-operator/issues/263)) ([115ac32](https://github.com/GoogleCloudPlatform/cloud-sql-proxy-operator/commit/115ac329a10cf445ccf59c89e965db6c92ce5831)) + + +### Miscellaneous Chores + +* Prepare release 0.4.0 ([2e6a6ad](https://github.com/GoogleCloudPlatform/cloud-sql-proxy-operator/commit/2e6a6adcd92dce926e4ccaa1f15d2386b5b30721)) + ## [0.3.0](https://github.com/GoogleCloudPlatform/cloud-sql-proxy-operator/compare/v0.2.0...v0.3.0) (2023-02-21) diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md new file mode 100644 index 00000000..46b2a08e --- /dev/null +++ b/CODE_OF_CONDUCT.md @@ -0,0 +1,43 @@ +# Contributor Code of Conduct + +As contributors and maintainers of this project, +and in the interest of fostering an open and welcoming community, +we pledge to respect all people who contribute through reporting issues, +posting feature requests, updating documentation, +submitting pull requests or patches, and other activities. + +We are committed to making participation in this project +a harassment-free experience for everyone, +regardless of level of experience, gender, gender identity and expression, +sexual orientation, disability, personal appearance, +body size, race, ethnicity, age, religion, or nationality. + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery +* Personal attacks +* Trolling or insulting/derogatory comments +* Public or private harassment +* Publishing other's private information, +such as physical or electronic +addresses, without explicit permission +* Other unethical or unprofessional conduct. + +Project maintainers have the right and responsibility to remove, edit, or reject +comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct. +By adopting this Code of Conduct, +project maintainers commit themselves to fairly and consistently +applying these principles to every aspect of managing this project. +Project maintainers who do not follow or enforce the Code of Conduct +may be permanently removed from the project team. + +This code of conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. + +Instances of abusive, harassing, or otherwise unacceptable behavior +may be reported by opening an issue +or contacting one or more of the project maintainers. + +This Code of Conduct is adapted from the [Contributor Covenant](http://contributor-covenant.org), version 1.2.0, +available at [http://contributor-covenant.org/version/1/2/0/](http://contributor-covenant.org/version/1/2/0/) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 00000000..6272489d --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,28 @@ +# How to Contribute + +We'd love to accept your patches and contributions to this project. There are +just a few small guidelines you need to follow. + +## Contributor License Agreement + +Contributions to this project must be accompanied by a Contributor License +Agreement. You (or your employer) retain the copyright to your contribution; +this simply gives us permission to use and redistribute your contributions as +part of the project. Head over to to see +your current agreements on file or to sign a new one. + +You generally only need to submit a CLA once, so if you've already submitted one +(even if it was for a different project), you probably don't need to do it +again. + +## Code Reviews + +All submissions, including submissions by project members, require review. We +use GitHub pull requests for this purpose. Consult +[GitHub Help](https://help.github.com/articles/about-pull-requests/) for more +information on using pull requests. + +## Community Guidelines + +This project follows [Google's Open Source Community +Guidelines](https://opensource.google/conduct/). diff --git a/Dockerfile-operator b/Dockerfile-operator index abbacd9a..c98c6bed 100644 --- a/Dockerfile-operator +++ b/Dockerfile-operator @@ -39,4 +39,6 @@ ARG TARGETARCH WORKDIR / USER 65532:65532 COPY --from=build --chown=nonroot "/work/bin/manager" "/manager" +COPY "./ThirdPartyLicenses" "/ThirdPartyLicenses" +COPY "./LICENSE" "/LICENSE" ENTRYPOINT ["/manager"] diff --git a/Makefile b/Makefile index a7ac5d6e..d668d39e 100644 --- a/Makefile +++ b/Makefile @@ -54,7 +54,7 @@ SHELL = /usr/bin/env bash -o pipefail .SHELLFLAGS = -ec ## The version to use for the cert-manager operator -CERT_MANAGER_VERSION=v1.9.1 +CERT_MANAGER_VERSION=v1.11.0# renovate datasource=github-tags depName=cert-manager/cert-manager ##@ General @@ -86,7 +86,7 @@ install_tools: remove_tools all_tools ## Installs all development tools @echo "TIME: $(shell date) end install tools" .PHONY: generate -generate: ctrl_generate ctrl_manifests go_lint tf_lint installer reset_image add_copyright_header go_fmt yaml_fmt ## Runs code generation, format, and validation tools +generate: ctrl_generate ctrl_manifests generate_crd_docs go_lint tf_lint installer reset_image add_copyright_header go_fmt yaml_fmt license_check license_save ## Runs code generation, format, and validation tools @echo "TIME: $(shell date) end make generate" .PHONY: build @@ -135,8 +135,14 @@ add_copyright_header: # Add the copyright header update_version_in_docs: # Fix version numbers that appear in the markdown documentation # Update links to the install script find . -name '*.md' | xargs sed -i.bak -E 's|storage.googleapis.com/cloud-sql-connectors/cloud-sql-proxy-operator/[^/]+/install.sh|storage.googleapis.com/cloud-sql-connectors/cloud-sql-proxy-operator/v$(VERSION)/install.sh|g' && \ + find . -name '*.md' | xargs sed -i.bak -E 's|storage.googleapis.com/cloud-sql-connectors/cloud-sql-proxy-operator/[^/]+/cloud-sql-proxy-operator.yaml|storage.googleapis.com/cloud-sql-connectors/cloud-sql-proxy-operator/v$(VERSION)/cloud-sql-proxy-operator.yaml|g' && \ find . -name '*.md.bak' | xargs rm -f +.PHONY: generate_crd_docs +generate_crd_docs: crd-ref-docs # Generate the + $(CRD_REF_DOCS) --source-path=internal/api/ --config=tools/config-crd-ref-docs.yaml --output-path=bin --renderer=markdown && \ + cp bin/out.md docs/api.md + .PHONY: build_push_docker build_push_docker: # Build docker image with the operator. set IMG env var before running: `IMG=example.com/img:1.0 make build` @test -n "$(IMG)" || ( echo "IMG environment variable must be set to the public repo where you want to push the image" ; exit 1) @@ -163,6 +169,19 @@ go_test: ctrl_manifests envtest # Run tests (but not internal/teste2e) KUBEBUILDER_ASSETS="$(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) -p path)" \ go test ./internal/.../. -coverprofile cover.out -race +## +# 3rd Party License Checks +.PHONY: license_check +license_check: go-licenses # checks that all deps use allowed licenses + $(GO_LICENSES) check . + + +.PHONY: license_save +license_save: go-licenses # Download all 3rd party license for to include in docker image + ( test -d ThirdPartyLicenses && rm -rf ThirdPartyLicenses ) || true + $(GO_LICENSES) save --save_path ThirdPartyLicenses . + + ## # Kubernetes configuration targets SOURCE_CODE_IMAGE=cloud-sql-proxy-operator:latest @@ -204,6 +223,7 @@ deploy_operator: kustomize kubectl # Deploy controller to the K8s cluster using cd config/manager && $(KUSTOMIZE) edit set image controller=$(IMG) $(KUSTOMIZE) build config/default | $(KUBECTL) apply -f - $(E2E_KUBECTL) rollout status deployment -n cloud-sql-proxy-operator-system cloud-sql-proxy-operator-controller-manager --timeout=90s + $(E2E_PRIVATE_KUBECTL) rollout status deployment -n cloud-sql-proxy-operator-system cloud-sql-proxy-operator-controller-manager --timeout=90s ## # Update installer @@ -229,6 +249,7 @@ installer/install.sh: ## Build install shell script to deploy the operator # This is the file where Terraform will write the kubeconfig.yaml for the # GKE cluster. KUBECONFIG_E2E ?= $(PWD)/bin/e2e-kubeconfig.yaml +PRIVATE_KUBECONFIG_E2E ?= $(PWD)/bin/e2e-private-kubeconfig.yaml # This is the file where Terraform will write the kubeconfig.yaml for the # GKE cluster. @@ -236,7 +257,9 @@ ENVIRONMENT_NAME ?= $(shell whoami) # kubectl command with proper environment vars set E2E_KUBECTL_ENV = USE_GKE_E2E_AUTH_PLUGIN=True KUBECONFIG=$(KUBECONFIG_E2E) +E2E_PRIVATE_KUBECTL_ENV = USE_GKE_E2E_AUTH_PLUGIN=True KUBECONFIG=$(PRIVATE_KUBECONFIG_E2E) E2E_KUBECTL = $(E2E_KUBECTL_ENV) $(KUBECTL) +E2E_PRIVATE_KUBECTL = $(E2E_PRIVATE_KUBECTL_ENV) $(KUBECTL) # This is the file where Terraform will write the URL to the e2e container registry E2E_DOCKER_URL_FILE :=$(PWD)/bin/gcloud-docker-repo.url @@ -281,6 +304,7 @@ e2e_cluster_job: e2e_project terraform # Build infrastructure for e2e tests in t PROJECT_DIR=$(PWD) \ E2E_PROJECT_ID=$(E2E_PROJECT_ID) \ KUBECONFIG_E2E=$(KUBECONFIG_E2E) \ + PRIVATE_KUBECONFIG_E2E=$(PRIVATE_KUBECONFIG_E2E) \ E2E_DOCKER_URL_FILE=$(E2E_DOCKER_URL_FILE) \ ENVIRONMENT_NAME=$(ENVIRONMENT_NAME) \ NODEPOOL_SERVICEACCOUNT_EMAIL=$(NODEPOOL_SERVICEACCOUNT_EMAIL) \ @@ -294,6 +318,7 @@ e2e_cluster: e2e_project terraform # Build infrastructure for e2e tests PROJECT_DIR=$(PWD) \ E2E_PROJECT_ID=$(E2E_PROJECT_ID) \ KUBECONFIG_E2E=$(KUBECONFIG_E2E) \ + PRIVATE_KUBECONFIG_E2E=$(PRIVATE_KUBECONFIG_E2E) \ E2E_DOCKER_URL_FILE=$(E2E_DOCKER_URL_FILE) \ ENVIRONMENT_NAME=$(ENVIRONMENT_NAME) \ TESTINFRA_JSON_FILE=$(LOCALBIN)/testinfra.json \ @@ -304,6 +329,7 @@ e2e_cluster_destroy: e2e_project terraform # Destroy the infrastructure for e2e PROJECT_DIR=$(PWD) \ E2E_PROJECT_ID=$(E2E_PROJECT_ID) \ KUBECONFIG_E2E=$(KUBECONFIG_E2E) \ + PRIVATE_KUBECONFIG_E2E=$(PRIVATE_KUBECONFIG_E2E) \ E2E_DOCKER_URL_FILE=$(E2E_DOCKER_URL_FILE) \ ENVIRONMENT_NAME=$(ENVIRONMENT_NAME) \ TESTINFRA_JSON_FILE=$(LOCALBIN)/testinfra.json \ @@ -311,32 +337,23 @@ e2e_cluster_destroy: e2e_project terraform # Destroy the infrastructure for e2e .PHONY: e2e_cert_manager_deploy e2e_cert_manager_deploy: e2e_project helm # Deploy the certificate manager - helm repo add jetstack https://charts.jetstack.io --kubeconfig=$(KUBECONFIG_E2E) - helm repo update --kubeconfig=$(KUBECONFIG_E2E) - helm get all -n cert-manager cert-manager --kubeconfig=$(KUBECONFIG_E2E) || \ - helm --kubeconfig=$(KUBECONFIG_E2E) install \ - cert-manager jetstack/cert-manager \ - --kubeconfig=$(KUBECONFIG_E2E) \ - --namespace cert-manager \ - --version "$(CERT_MANAGER_VERSION)" \ - --create-namespace \ - --set global.leaderElection.namespace=cert-manager \ - --set installCRDs=true - + KUBECONFIG=$(KUBECONFIG_E2E) CERT_MANAGER_VERSION=$(CERT_MANAGER_VERSION) tools/helm-install-certmanager.sh + KUBECONFIG=$(PRIVATE_KUBECONFIG_E2E) CERT_MANAGER_VERSION=$(CERT_MANAGER_VERSION) tools/helm-install-certmanager.sh .PHONY: e2e_install_crd e2e_install_crd: generate e2e_project kustomize kubectl $(E2E_WORK_DIR) # Install CRDs into the GKE cluster $(KUSTOMIZE) build config/crd > $(E2E_WORK_DIR)/crd.yaml $(E2E_KUBECTL) apply -f $(E2E_WORK_DIR)/crd.yaml - - + $(E2E_PRIVATE_KUBECTL) apply -f $(E2E_WORK_DIR)/crd.yaml .PHONY: e2e_deploy e2e_deploy: e2e_project kustomize kubectl $(E2E_WORK_DIR) # Deploy the operator to the GKE cluster cd config/manager && $(KUSTOMIZE) edit set image controller=$(E2E_OPERATOR_URL) $(KUSTOMIZE) build config/default > $(E2E_WORK_DIR)/operator.yaml $(E2E_KUBECTL) apply -f $(E2E_WORK_DIR)/operator.yaml + $(E2E_PRIVATE_KUBECTL) apply -f $(E2E_WORK_DIR)/operator.yaml $(E2E_KUBECTL) rollout status deployment -n cloud-sql-proxy-operator-system cloud-sql-proxy-operator-controller-manager --timeout=90s + $(E2E_PRIVATE_KUBECTL) rollout status deployment -n cloud-sql-proxy-operator-system cloud-sql-proxy-operator-controller-manager --timeout=90s # Note: `go test --count=1` is used to make sure go actually runs the tests every @@ -354,10 +371,14 @@ e2e_cleanup_test_namespaces: e2e_project kustomize kubectl # remove e2e test nam ( $(E2E_KUBECTL) get ns -o=name | \ grep namespace/test | \ $(E2E_KUBECTL_ENV) xargs $(KUBECTL) delete ) || true + ( $(E2E_PRIVATE_KUBECTL) get ns -o=name | \ + grep namespace/test | \ + $(E2E_PRIVATE_KUBECTL_ENV) xargs $(KUBECTL) delete ) || true .PHONY: e2e_undeploy e2e_undeploy: e2e_project kustomize kubectl $(E2E_WORK_DIR) # Remove the operator from the GKE cluster $(E2E_KUBECTL) delete -f $(E2E_WORK_DIR)/operator.yaml + $(E2E_PRIVATE_KUBECTL) delete -f $(E2E_WORK_DIR)/operator.yaml ### # Build the operator docker image and push it to the @@ -408,22 +429,31 @@ KUBECTL ?= $(LOCALBIN)/kubectl ENVTEST ?= $(LOCALBIN)/setup-envtest TERRAFORM ?= $(LOCALBIN)/terraform GOLANGCI_LINT ?= $(LOCALBIN)/golangci-lint +GO_LICENSES ?= $(LOCALBIN)/go-licenses +CRD_REF_DOCS ?= $(LOCALBIN)/crd-ref-docs ## Tool Versions -CONTROLLER_TOOLS_VERSION ?= latest -KUBECTL_VERSION ?= $(shell curl -L -s https://dl.k8s.io/release/stable.txt | tr -d '\n') -TERRAFORM_VERSION ?= 1.2.7 -KUSTOMIZE_VERSION ?= v4.5.2 -ENVTEST_VERSION ?= latest -GOLANGCI_LINT_VERSION ?= latest +# Important note: avoid adding spaces in the macro declarations as any +# additional whitespace will break the renovate regex rules. + +KUBECTL_VERSION=v1.26.3# renovate datasource=github-tags depName=kubernetes/kubernetes +TERRAFORM_VERSION=v1.4.2# renovate datasource=github-tags depName=hashicorp/terraform + +CONTROLLER_TOOLS_VERSION=v0.11.3# renovate datasource=go depName=sigs.k8s.io/controller-tools +CRD_REF_DOCS_VERSION=v0.0.8# renovate datasource=go depName=github.com/elastic/crd-ref-docs +ENVTEST_VERSION=v0.0.0-20230301194117-e2d8821b277f# renovate datasource=go depName=sigs.k8s.io/controller-runtime/tools/setup-envtest +GOLANGCI_LINT_VERSION=v1.51.2# renovate datasource=go depName=github.com/golangci/golangci-lint/cmd/golangci-lint +GO_LICENSES_VERSION=v1.6.0# renovate datasource=go depName=github.com/google/go-licenses + +KUSTOMIZE_VERSION=v4.5.2# don't manage with renovate, this repo has non-standard tags GOOS?=$(shell go env GOOS | tr -d '\n') GOARCH?=$(shell go env GOARCH | tr -d '\n') remove_tools: - rm -rf $(LOCALBIN)/* + rm -rf $(KUSTOMIZE) $(CONTROLLER_GEN) $(KUBECTL) $(ENVTEST) $(TERRAFORM) $(GOLANGCI_LINT) $(CRD_REF_DOCS) -all_tools: kustomize controller-gen envtest kubectl terraform golangci-lint +all_tools: kustomize controller-gen envtest kubectl terraform golangci-lint crd-ref-docs .PHONY: controller-gen controller-gen: $(CONTROLLER_GEN) # Download controller-gen locally if necessary. @@ -441,6 +471,11 @@ envtest: $(ENVTEST) # Download envtest-setup locally if necessary. $(ENVTEST): $(LOCALBIN) test -s $(LOCALBIN)/setup-envtest || GOBIN=$(LOCALBIN) go install sigs.k8s.io/controller-runtime/tools/setup-envtest@$(ENVTEST_VERSION) +.PHONY: crd-ref-docs +crd-ref-docs: $(CRD_REF_DOCS) # Download crd-ref-docs locally if necessary. +$(CRD_REF_DOCS): $(LOCALBIN) + test -s $(LOCALBIN)/crd-ref-docs || GOBIN=$(LOCALBIN) go install github.com/elastic/crd-ref-docs@$(CRD_REF_DOCS_VERSION) + .PHONY: kubectl kubectl: $(KUBECTL) # Download kubectl $(KUBECTL): $(LOCALBIN) @@ -453,7 +488,7 @@ $(KUBECTL): $(LOCALBIN) terraform: $(TERRAFORM) # Download terraform $(TERRAFORM): $(LOCALBIN) test -s $@ || \ - ( curl -L -o $@.zip https://releases.hashicorp.com/terraform/$(TERRAFORM_VERSION)/terraform_$(TERRAFORM_VERSION)_$(GOOS)_$(GOARCH).zip && \ + ( curl -L -o $@.zip https://releases.hashicorp.com/terraform/$(subst v,,$(TERRAFORM_VERSION))/terraform_$(subst v,,$(TERRAFORM_VERSION))_$(GOOS)_$(GOARCH).zip && \ cd $(LOCALBIN) && unzip -o $@.zip && \ rm -f $@.zip && \ chmod a+x $@ && \ @@ -464,6 +499,11 @@ golangci-lint: $(GOLANGCI_LINT) ## Download controller-gen locally if necessary. $(GOLANGCI_LINT): $(LOCALBIN) test -s $@ || GOBIN=$(LOCALBIN) go install github.com/golangci/golangci-lint/cmd/golangci-lint@$(GOLANGCI_LINT_VERSION) +.PHONY: go-licenses +go-licenses: $(GO_LICENSES) ## Download controller-gen locally if necessary. +$(GO_LICENSES): $(LOCALBIN) + test -s $@ || GOBIN=$(LOCALBIN) go install github.com/google/go-licenses@$(GO_LICENSES_VERSION) + ## # Tools that need to be installed on the development machine diff --git a/PROJECT b/PROJECT index 726e5ca6..beb90bbb 100644 --- a/PROJECT +++ b/PROJECT @@ -11,8 +11,8 @@ resources: domain: cloud.google.com group: cloudsql kind: AuthProxyWorkload - path: github.com/GoogleCloudPlatform/cloud-sql-proxy-operator/internal/api/v1alpha1 - version: v1alpha1 + path: github.com/GoogleCloudPlatform/cloud-sql-proxy-operator/internal/api/v1 + version: v1 webhooks: defaulting: true validation: true diff --git a/README.md b/README.md index 01bcd3cb..2ae702f6 100644 --- a/README.md +++ b/README.md @@ -14,9 +14,11 @@ to the matching workload pods. ## Installation -Check for the latest version on the [releases page][releases] and use the +Check for the latest version on the [releases page][latest-release] and use the following instructions. +[latest-release]: https://github.com/GoogleCloudPlatform/cloud-sql-proxy-operator/releases/latest + Confirm that kubectl can connect to your kubernetes cluster. ```shell @@ -43,7 +45,7 @@ Run the following command to install the cloud sql proxy operator into your kubernetes cluster: ```shell -kubectl apply -f https://storage.googleapis.com/cloud-sql-connectors/cloud-sql-proxy-operator/v0.2.0/cloud-sql-proxy-operator.yaml +kubectl apply -f https://storage.googleapis.com/cloud-sql-connectors/cloud-sql-proxy-operator/v0.3.0/cloud-sql-proxy-operator.yaml ``` Confirm that the operator is installed and running by listing its pods: @@ -71,6 +73,7 @@ to know all the details to configure your proxy. ## Reference Documentation - [Quick Start Guide](docs/quick-start.md) +- [API Documentation](docs/api.md) - [Cloud SQL Proxy](/GoogleCloudPlatform/cloud-sql-proxy) - [Developer Getting Started](docs/dev.md) - [Developing End-to-End tests](docs/e2e-tests.md) diff --git a/SECURITY.md b/SECURITY.md new file mode 100644 index 00000000..8b58ae9c --- /dev/null +++ b/SECURITY.md @@ -0,0 +1,7 @@ +# Security Policy + +To report a security issue, please use [g.co/vulnz](https://g.co/vulnz). + +The Google Security Team will respond within 5 working days of your report on g.co/vulnz. + +We use g.co/vulnz for our intake, and do coordination and disclosure here using GitHub Security Advisory to privately discuss and fix the issue. diff --git a/config/crd/bases/cloudsql.cloud.google.com_authproxyworkloads.yaml b/config/crd/bases/cloudsql.cloud.google.com_authproxyworkloads.yaml index 25ee007d..9340496d 100644 --- a/config/crd/bases/cloudsql.cloud.google.com_authproxyworkloads.yaml +++ b/config/crd/bases/cloudsql.cloud.google.com_authproxyworkloads.yaml @@ -27,10 +27,10 @@ spec: singular: authproxyworkload scope: Namespaced versions: - - name: v1alpha1 + - name: v1 schema: openAPIV3Schema: - description: AuthProxyWorkload declares how a Cloud SQL Proxy container should be applied to a matching set of workloads, and shows the status of those proxy containers. This is the Schema for the authproxyworkloads API. + description: AuthProxyWorkload declares how a Cloud SQL Proxy container should be applied to a matching set of workloads, and shows the status of those proxy containers. properties: apiVersion: description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' @@ -41,11 +41,26 @@ spec: metadata: type: object spec: - description: AuthProxyWorkloadSpec defines the desired state of AuthProxyWorkload + description: AuthProxyWorkloadSpec describes where and how to configure the proxy. properties: authProxyContainer: - description: AuthProxyContainer describes the resources and config for the Auth Proxy container + description: AuthProxyContainer describes the resources and config for the Auth Proxy container. properties: + adminServer: + description: AdminServer specifies the config for the proxy's admin service which is available to other containers in the same pod. + properties: + enableAPIs: + description: 'EnableAPIs specifies the list of admin APIs to enable. At least one API must be enabled. Possible values: - "Debug" will enable pprof debugging by setting the `--debug` cli flag. - "QuitQuitQuit" will enable pprof debugging by setting the `--quitquitquit` cli flag.' + items: + type: string + minItems: 1 + type: array + port: + description: Port the port for the proxy's localhost-only admin server. This sets the proxy container's CLI argument `--admin-port` + format: int32 + minimum: 1 + type: integer + type: object container: description: Container is debugging parameter that when specified will override the proxy container with a completely custom Container spec. properties: @@ -557,7 +572,7 @@ spec: description: 'Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' properties: claims: - description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable." + description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable. It can only be set for containers." items: description: ResourceClaim references one entry in PodSpec.ResourceClaims. properties: @@ -845,21 +860,23 @@ spec: - name type: object image: - description: Image is the URL to the proxy image. Optional, by default the operator will use the latest known compatible proxy image. + description: "Image is the URL to the proxy image. Optional, by default the operator will use the latest Cloud SQL Auth Proxy version as of the release of the operator. \n The operator ensures that all workloads configured with the default proxy image are upgraded automatically to use to the latest released proxy image. \n When the customer upgrades the operator, the operator upgrades all workloads using the default proxy image to the latest proxy image. The change to the proxy container image is applied in accordance with the RolloutStrategy." type: string maxConnections: description: MaxConnections limits the number of connections. Default value is no limit. This sets the proxy container's CLI argument `--max-connections` format: int64 + minimum: 0 type: integer maxSigtermDelay: description: MaxSigtermDelay is the maximum number of seconds to wait for connections to close after receiving a TERM signal. This sets the proxy container's CLI argument `--max-sigterm-delay` and configures `terminationGracePeriodSeconds` on the workload's PodSpec. format: int64 + minimum: 0 type: integer resources: description: Resources specifies the resources required for the proxy pod. properties: claims: - description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable." + description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable. It can only be set for containers." items: description: ResourceClaim references one entry in PodSpec.ResourceClaims. properties: @@ -905,35 +922,61 @@ spec: telemetry: description: Telemetry specifies how the proxy should expose telemetry. Optional, by default properties: + disableMetrics: + description: DisableMetrics disables Cloud Monitoring testintegration (used with telemetryProject) This sets the proxy container's CLI argument `--disable-metrics` + type: boolean + disableTraces: + description: DisableTraces disables Cloud Trace testintegration (used with telemetryProject) This sets the proxy container's CLI argument `--disable-traces` + type: boolean httpPort: description: HTTPPort the port for Prometheus and health check server. This sets the proxy container's CLI argument `--http-port` format: int32 type: integer + prometheus: + description: Prometheus Enables Prometheus HTTP endpoint /metrics on localhost This sets the proxy container's CLI argument `--prometheus` + type: boolean + prometheusNamespace: + description: PrometheusNamespace is used the provided Prometheus namespace for metrics This sets the proxy container's CLI argument `--prometheus-namespace` + type: string + quotaProject: + description: QuotaProject Specifies the project to use for Cloud SQL Admin API quota tracking. The IAM principal must have the "serviceusage.services.use" permission for the given project. See https://cloud.google.com/service-usage/docs/overview and https://cloud.google.com/storage/docs/requester-pays This sets the proxy container's CLI argument `--quota-project` + type: string + telemetryPrefix: + description: TelemetryPrefix is the prefix for Cloud Monitoring metrics. This sets the proxy container's CLI argument `--telemetry-prefix` + type: string + telemetryProject: + description: TelemetryProject enables Cloud Monitoring and Cloud Trace with the provided project ID. This sets the proxy container's CLI argument `--telemetry-project` + type: string + telemetrySampleRate: + description: TelemetrySampleRate is the Cloud Trace sample rate. A smaller number means more traces. This sets the proxy container's CLI argument `--telemetry-sample-rate` + type: integer type: object type: object instances: - description: Instances lists the Cloud SQL instances to connect + description: Instances describes the Cloud SQL instances to configure on the proxy container. items: - description: "InstanceSpec describes the configuration for how the proxy should expose a Cloud SQL database instance to a workload. The simplest possible configuration declares just the connection string and the port number or unix socket. \n For example, for a TCP port: \n { \"connectionString\":\"my-project:us-central1:my-db-server\", \"port\":5000 } \n or for a unix socket: \n { \"connectionString\":\"my-project:us-central1:my-db-server\", \"unixSocketPath\" : \"/mnt/db/my-db-server\" } \n You may allow the operator to choose a non-conflicting TCP port or unix socket instead of explicitly setting the port or socket path. This may be easier to manage when workload needs to connect to many databases. \n For example, for a TCP port: \n { \"connectionString\":\"my-project:us-central1:my-db-server\", \"portEnvName\":\"MY_DB_SERVER_PORT\" \"hostEnvName\":\"MY_DB_SERVER_HOST\" } \n will set environment variables MY_DB_SERVER_PORT MY_DB_SERVER_HOST with the value of the TCP port and hostname. Then, the application can read these values to connect to the database through the proxy. \n or for a unix socket: \n { \"connectionString\":\"my-project:us-central1:my-db-server\", \"unixSocketPathEnvName\" : \"MY_DB_SERVER_SOCKET_DIR\" } \n will set environment variables MY_DB_SERVER_SOCKET_DIR with the value of the unix socket path. Then, the application can read this value to connect to the database through the proxy." + description: "InstanceSpec describes the configuration for how the proxy should expose a Cloud SQL database instance to a workload. \n In the minimum recommended configuration, the operator will choose a non-conflicting TCP port and set environment variables MY_DB_SERVER_PORT MY_DB_SERVER_HOST with the value of the TCP port and hostname. The application can read these values to connect to the database through the proxy. For example: \n `{ \"connectionString\":\"my-project:us-central1:my-db-server\", \"portEnvName\":\"MY_DB_SERVER_PORT\" \"hostEnvName\":\"MY_DB_SERVER_HOST\" }` \n If you want to assign a specific port number for a database, set the `port` field. For example: \n `{ \"connectionString\":\"my-project:us-central1:my-db-server\", \"port\":5000 }`" properties: autoIAMAuthN: - description: AutoIAMAuthN Enables IAM Authentication for this instance. Optional, default false. + description: AutoIAMAuthN (optional) Enables IAM Authentication for this instance. Default value is false. type: boolean connectionString: - description: ConnectionString is the Cloud SQL instance. + description: ConnectionString is the connection string for the Cloud SQL Instance in the format `project_id:region:instance_name` + pattern: ^([^:]+(:[^:]+)?):([^:]+):([^:]+)$ type: string hostEnvName: description: HostEnvName The name of the environment variable containing this instances tcp hostname Optional, when set this environment variable will be added to all containers in the workload. type: string port: - description: Port sets the tcp port for this instance. Optional, if not set, a value will be automatically assigned by the operator and set as an environment variable on all containers in the workload named according to PortEnvName. The operator will choose a port so that it does not conflict with other ports on the workload. + description: Port (optional) sets the tcp port for this instance. If not set, a value will be automatically assigned by the operator and set as an environment variable on all containers in the workload named according to PortEnvName. The operator will choose a port so that it does not conflict with other ports on the workload. format: int32 + minimum: 1 type: integer portEnvName: description: PortEnvName is name of the environment variable containing this instance's tcp port. Optional, when set this environment variable will be added to all containers in the workload. type: string privateIP: - description: PrivateIP Enable connection to the Cloud SQL instance's private ip for this instance. Optional, default false. + description: PrivateIP (optional) Enable connection to the Cloud SQL instance's private ip for this instance. Default value is false. type: boolean unixSocketPath: description: UnixSocketPath is the path to the unix socket where the proxy will listen for connnections. This will be mounted to all containers in the pod. @@ -945,7 +988,7 @@ spec: minItems: 1 type: array workloadSelector: - description: Workload selects the workload to + description: Workload selects the workload where the proxy container will be added. properties: kind: description: 'Kind specifies what kind of workload Supported kinds: Deployment, StatefulSet, Pod, ReplicaSet,DaemonSet, Job, CronJob Example: "Deployment" "Deployment.v1" or "Deployment.v1.apps".' @@ -955,7 +998,7 @@ spec: description: Name specifies the name of the resource to select. type: string selector: - description: Selector selects resources using labels. See "Label selectors" in the kubernetes docs https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors + description: Selector (optional) selects resources using labels. See "Label selectors" in the kubernetes docs https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. diff --git a/config/manager/controller_manager_config.yaml b/config/manager/controller_manager_config.yaml index 56f7bb51..3f0f8bca 100644 --- a/config/manager/controller_manager_config.yaml +++ b/config/manager/controller_manager_config.yaml @@ -11,7 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -apiVersion: controller-runtime.sigs.k8s.io/v1alpha1 +apiVersion: controller-runtime.sigs.k8s.io/v1 kind: ControllerManagerConfig health: healthProbeBindAddress: :8081 diff --git a/config/samples/cloudsql_v1alpha1_authproxyworkload.yaml b/config/samples/cloudsql_v1alpha1_authproxyworkload.yaml index 30279faf..3661f6da 100644 --- a/config/samples/cloudsql_v1alpha1_authproxyworkload.yaml +++ b/config/samples/cloudsql_v1alpha1_authproxyworkload.yaml @@ -11,7 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -apiVersion: cloudsql.cloud.google.com/v1alpha1 +apiVersion: cloudsql.cloud.google.com/v1 kind: AuthProxyWorkload metadata: name: authproxyworkload-sample diff --git a/config/webhook/manifests.yaml b/config/webhook/manifests.yaml index 4e42f8b9..475e0188 100644 --- a/config/webhook/manifests.yaml +++ b/config/webhook/manifests.yaml @@ -23,14 +23,14 @@ webhooks: service: name: webhook-service namespace: system - path: /mutate-cloudsql-cloud-google-com-v1alpha1-authproxyworkload + path: /mutate-cloudsql-cloud-google-com-v1-authproxyworkload failurePolicy: Fail name: mauthproxyworkload.kb.io rules: - apiGroups: - cloudsql.cloud.google.com apiVersions: - - v1alpha1 + - v1 operations: - CREATE - UPDATE @@ -50,14 +50,14 @@ webhooks: service: name: webhook-service namespace: system - path: /validate-cloudsql-cloud-google-com-v1alpha1-authproxyworkload + path: /validate-cloudsql-cloud-google-com-v1-authproxyworkload failurePolicy: Fail name: vauthproxyworkload.kb.io rules: - apiGroups: - cloudsql.cloud.google.com apiVersions: - - v1alpha1 + - v1 operations: - CREATE - UPDATE diff --git a/docs/api.md b/docs/api.md new file mode 100644 index 00000000..1acb5c8b --- /dev/null +++ b/docs/api.md @@ -0,0 +1,153 @@ +# API Reference + +## Packages +- [cloudsql.cloud.google.com/v1](#cloudsqlcloudgooglecomv1) + + +## cloudsql.cloud.google.com/v1 + +Package v1 contains the API Schema definitions for the +the custom resource AuthProxyWorkload version v1. + + +### Resource Types +- [AuthProxyWorkload](#authproxyworkload) + + + +#### AdminServerSpec + + + +AdminServerSpec specifies how to start the proxy's admin server: which port and whether to enable debugging or quitquitquit. It controls to the proxy's --admin-port, --debug, and --quitquitquit CLI flags. + +_Appears in:_ +- [AuthProxyContainerSpec](#authproxycontainerspec) + +| Field | Description | +| --- | --- | +| `port` _integer_ | Port the port for the proxy's localhost-only admin server. This sets the proxy container's CLI argument `--admin-port` | +| `enableAPIs` _string array_ | EnableAPIs specifies the list of admin APIs to enable. At least one API must be enabled. Possible values: - "Debug" will enable pprof debugging by setting the `--debug` cli flag. - "QuitQuitQuit" will enable pprof debugging by setting the `--quitquitquit` cli flag. | + + +#### AuthProxyContainerSpec + + + +AuthProxyContainerSpec describes how to configure global proxy configuration and kubernetes-specific container configuration. + +_Appears in:_ +- [AuthProxyWorkloadSpec](#authproxyworkloadspec) + +| Field | Description | +| --- | --- | +| `container` _[Container](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#container-v1-core)_ | Container is debugging parameter that when specified will override the proxy container with a completely custom Container spec. | +| `resources` _[ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#resourcerequirements-v1-core)_ | Resources specifies the resources required for the proxy pod. | +| `telemetry` _[TelemetrySpec](#telemetryspec)_ | Telemetry specifies how the proxy should expose telemetry. Optional, by default | +| `adminServer` _[AdminServerSpec](#adminserverspec)_ | AdminServer specifies the config for the proxy's admin service which is available to other containers in the same pod. | +| `maxConnections` _integer_ | MaxConnections limits the number of connections. Default value is no limit. This sets the proxy container's CLI argument `--max-connections` | +| `maxSigtermDelay` _integer_ | MaxSigtermDelay is the maximum number of seconds to wait for connections to close after receiving a TERM signal. This sets the proxy container's CLI argument `--max-sigterm-delay` and configures `terminationGracePeriodSeconds` on the workload's PodSpec. | +| `sqlAdminAPIEndpoint` _string_ | SQLAdminAPIEndpoint is a debugging parameter that when specified will change the Google Cloud api endpoint used by the proxy. | +| `image` _string_ | Image is the URL to the proxy image. Optional, by default the operator will use the latest Cloud SQL Auth Proxy version as of the release of the operator. + The operator ensures that all workloads configured with the default proxy image are upgraded automatically to use to the latest released proxy image. + When the customer upgrades the operator, the operator upgrades all workloads using the default proxy image to the latest proxy image. The change to the proxy container image is applied in accordance with the RolloutStrategy. | +| `rolloutStrategy` _string_ | RolloutStrategy indicates the strategy to use when rolling out changes to the workloads affected by the results. When this is set to `Workload`, changes to this resource will be automatically applied to a running Deployment, StatefulSet, DaemonSet, or ReplicaSet in accordance with the Strategy set on that workload. When this is set to `None`, the operator will take no action to roll out changes to affected workloads. `Workload` will be used by default if no value is set. See: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy | + + +#### AuthProxyWorkload + + + +AuthProxyWorkload declares how a Cloud SQL Proxy container should be applied to a matching set of workloads, and shows the status of those proxy containers. + + + +| Field | Description | +| --- | --- | +| `apiVersion` _string_ | `cloudsql.cloud.google.com/v1` +| `kind` _string_ | `AuthProxyWorkload` +| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | +| `spec` _[AuthProxyWorkloadSpec](#authproxyworkloadspec)_ | | + + +#### AuthProxyWorkloadSpec + + + +AuthProxyWorkloadSpec describes where and how to configure the proxy. + +_Appears in:_ +- [AuthProxyWorkload](#authproxyworkload) + +| Field | Description | +| --- | --- | +| `workloadSelector` _[WorkloadSelectorSpec](#workloadselectorspec)_ | Workload selects the workload where the proxy container will be added. | +| `instances` _[InstanceSpec](#instancespec) array_ | Instances describes the Cloud SQL instances to configure on the proxy container. | +| `authProxyContainer` _[AuthProxyContainerSpec](#authproxycontainerspec)_ | AuthProxyContainer describes the resources and config for the Auth Proxy container. | + + +#### InstanceSpec + + + +InstanceSpec describes the configuration for how the proxy should expose a Cloud SQL database instance to a workload. + In the minimum recommended configuration, the operator will choose a non-conflicting TCP port and set environment variables MY_DB_SERVER_PORT MY_DB_SERVER_HOST with the value of the TCP port and hostname. The application can read these values to connect to the database through the proxy. For example: + `{ "connectionString":"my-project:us-central1:my-db-server", "portEnvName":"MY_DB_SERVER_PORT" "hostEnvName":"MY_DB_SERVER_HOST" }` + If you want to assign a specific port number for a database, set the `port` field. For example: + `{ "connectionString":"my-project:us-central1:my-db-server", "port":5000 }` + +_Appears in:_ +- [AuthProxyWorkloadSpec](#authproxyworkloadspec) + +| Field | Description | +| --- | --- | +| `connectionString` _string_ | ConnectionString is the connection string for the Cloud SQL Instance in the format `project_id:region:instance_name` | +| `port` _integer_ | Port (optional) sets the tcp port for this instance. If not set, a value will be automatically assigned by the operator and set as an environment variable on all containers in the workload named according to PortEnvName. The operator will choose a port so that it does not conflict with other ports on the workload. | +| `autoIAMAuthN` _boolean_ | AutoIAMAuthN (optional) Enables IAM Authentication for this instance. Default value is false. | +| `privateIP` _boolean_ | PrivateIP (optional) Enable connection to the Cloud SQL instance's private ip for this instance. Default value is false. | +| `portEnvName` _string_ | PortEnvName is name of the environment variable containing this instance's tcp port. Optional, when set this environment variable will be added to all containers in the workload. | +| `hostEnvName` _string_ | HostEnvName The name of the environment variable containing this instances tcp hostname Optional, when set this environment variable will be added to all containers in the workload. | +| `unixSocketPath` _string_ | UnixSocketPath is the path to the unix socket where the proxy will listen for connnections. This will be mounted to all containers in the pod. | +| `unixSocketPathEnvName` _string_ | UnixSocketPathEnvName is the environment variable containing the value of UnixSocketPath. | + + +#### TelemetrySpec + + + +TelemetrySpec specifies how the proxy container will expose telemetry. + +_Appears in:_ +- [AuthProxyContainerSpec](#authproxycontainerspec) + +| Field | Description | +| --- | --- | +| `quotaProject` _string_ | QuotaProject Specifies the project to use for Cloud SQL Admin API quota tracking. The IAM principal must have the "serviceusage.services.use" permission for the given project. See https://cloud.google.com/service-usage/docs/overview and https://cloud.google.com/storage/docs/requester-pays This sets the proxy container's CLI argument `--quota-project` | +| `prometheus` _boolean_ | Prometheus Enables Prometheus HTTP endpoint /metrics on localhost This sets the proxy container's CLI argument `--prometheus` | +| `prometheusNamespace` _string_ | PrometheusNamespace is used the provided Prometheus namespace for metrics This sets the proxy container's CLI argument `--prometheus-namespace` | +| `telemetryProject` _string_ | TelemetryProject enables Cloud Monitoring and Cloud Trace with the provided project ID. This sets the proxy container's CLI argument `--telemetry-project` | +| `telemetryPrefix` _string_ | TelemetryPrefix is the prefix for Cloud Monitoring metrics. This sets the proxy container's CLI argument `--telemetry-prefix` | +| `telemetrySampleRate` _integer_ | TelemetrySampleRate is the Cloud Trace sample rate. A smaller number means more traces. This sets the proxy container's CLI argument `--telemetry-sample-rate` | +| `httpPort` _integer_ | HTTPPort the port for Prometheus and health check server. This sets the proxy container's CLI argument `--http-port` | +| `disableTraces` _boolean_ | DisableTraces disables Cloud Trace testintegration (used with telemetryProject) This sets the proxy container's CLI argument `--disable-traces` | +| `disableMetrics` _boolean_ | DisableMetrics disables Cloud Monitoring testintegration (used with telemetryProject) This sets the proxy container's CLI argument `--disable-metrics` | + + +#### WorkloadSelectorSpec + + + +WorkloadSelectorSpec describes which workloads should be configured with this proxy configuration. To be valid, WorkloadSelectorSpec must specify `kind` and either `name` or `selector`. + +_Appears in:_ +- [AuthProxyWorkloadSpec](#authproxyworkloadspec) + +| Field | Description | +| --- | --- | +| `selector` _[LabelSelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#labelselector-v1-meta)_ | Selector (optional) selects resources using labels. See "Label selectors" in the kubernetes docs https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors | +| `kind` _string_ | Kind specifies what kind of workload Supported kinds: Deployment, StatefulSet, Pod, ReplicaSet,DaemonSet, Job, CronJob Example: "Deployment" "Deployment.v1" or "Deployment.v1.apps". | +| `name` _string_ | Name specifies the name of the resource to select. | + + + + diff --git a/docs/dev.md b/docs/dev.md index 475b4436..89f282b3 100644 --- a/docs/dev.md +++ b/docs/dev.md @@ -26,8 +26,8 @@ mv .bin/tmp/* . Then, to create the CRD for Workload ``` -.bin/kubebuilder create api --group cloudsql --version v1alpha1 --kind AuthProxyWorkload --controller --resource --force -.bin/kubebuilder create webhook --group cloudsql --version v1alpha1 --kind AuthProxyWorkload --defaulting --programmatic-validation +.bin/kubebuilder create api --group cloudsql --version v1 --kind AuthProxyWorkload --controller --resource --force +.bin/kubebuilder create webhook --group cloudsql --version v1 --kind AuthProxyWorkload --defaulting --programmatic-validation ``` diff --git a/docs/examples/deployment-mssql-tcp.yaml b/docs/examples/deployment-mssql-tcp.yaml index 639ce1dd..edb99306 100644 --- a/docs/examples/deployment-mssql-tcp.yaml +++ b/docs/examples/deployment-mssql-tcp.yaml @@ -20,7 +20,7 @@ # Create an AuthProxyWorkload to hold the configuration for your # Cloud SQL Proxy containers. -apiVersion: cloudsql.cloud.google.com/v1alpha1 +apiVersion: cloudsql.cloud.google.com/v1 kind: AuthProxyWorkload metadata: name: authproxyworkload-sample diff --git a/docs/examples/deployment-mysql-tcp.yaml b/docs/examples/deployment-mysql-tcp.yaml index 7c05c6d1..69831f2a 100644 --- a/docs/examples/deployment-mysql-tcp.yaml +++ b/docs/examples/deployment-mysql-tcp.yaml @@ -20,7 +20,7 @@ # Create an AuthProxyWorkload to hold the configuration for your # Cloud SQL Proxy containers. -apiVersion: cloudsql.cloud.google.com/v1alpha1 +apiVersion: cloudsql.cloud.google.com/v1 kind: AuthProxyWorkload metadata: name: authproxyworkload-sample diff --git a/docs/examples/deployment-postgres-tcp.yaml b/docs/examples/deployment-postgres-tcp.yaml index 7e3ab159..60e18fe4 100644 --- a/docs/examples/deployment-postgres-tcp.yaml +++ b/docs/examples/deployment-postgres-tcp.yaml @@ -20,7 +20,7 @@ # Create an AuthProxyWorkload to hold the configuration for your # Cloud SQL Proxy containers. -apiVersion: cloudsql.cloud.google.com/v1alpha1 +apiVersion: cloudsql.cloud.google.com/v1 kind: AuthProxyWorkload metadata: name: authproxyworkload-sample diff --git a/docs/quick-start.md b/docs/quick-start.md index 6a7d0970..dfe28539 100644 --- a/docs/quick-start.md +++ b/docs/quick-start.md @@ -49,7 +49,7 @@ gcloud sql instances describe quickstart-instance --format='value(connectionName Create a new file named `authproxyworkload.yaml` containing the following: ```yaml -apiVersion: cloudsql.cloud.google.com/v1alpha1 +apiVersion: cloudsql.cloud.google.com/v1 kind: AuthProxyWorkload metadata: name: authproxyworkload-sample diff --git a/go.mod b/go.mod index 8412d381..59c069f7 100644 --- a/go.mod +++ b/go.mod @@ -5,9 +5,9 @@ go 1.20 require ( github.com/go-logr/logr v1.2.3 go.uber.org/zap v1.24.0 - k8s.io/api v0.26.1 - k8s.io/apimachinery v0.26.1 - k8s.io/client-go v0.26.1 + k8s.io/api v0.26.3 + k8s.io/apimachinery v0.26.3 + k8s.io/client-go v0.26.3 sigs.k8s.io/controller-runtime v0.14.4 sigs.k8s.io/yaml v1.3.0 ) @@ -47,11 +47,11 @@ require ( github.com/spf13/pflag v1.0.5 // indirect go.uber.org/atomic v1.7.0 // indirect go.uber.org/multierr v1.6.0 // indirect - golang.org/x/net v0.3.1-0.20221206200815-1e63c2f08a10 // indirect + golang.org/x/net v0.7.0 // indirect golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b // indirect - golang.org/x/sys v0.3.0 // indirect - golang.org/x/term v0.3.0 // indirect - golang.org/x/text v0.5.0 // indirect + golang.org/x/sys v0.5.0 // indirect + golang.org/x/term v0.5.0 // indirect + golang.org/x/text v0.7.0 // indirect golang.org/x/time v0.3.0 // indirect gomodules.xyz/jsonpatch/v2 v2.2.0 // indirect google.golang.org/appengine v1.6.7 // indirect @@ -59,8 +59,8 @@ require ( gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/apiextensions-apiserver v0.26.1 // indirect - k8s.io/component-base v0.26.1 // indirect + k8s.io/apiextensions-apiserver v0.26.3 // indirect + k8s.io/component-base v0.26.3 // indirect k8s.io/klog/v2 v2.80.1 // indirect k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280 // indirect k8s.io/utils v0.0.0-20221128185143-99ec85e7a448 // indirect diff --git a/go.sum b/go.sum index 830526d2..56262714 100644 --- a/go.sum +++ b/go.sum @@ -347,8 +347,8 @@ golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwY golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.3.1-0.20221206200815-1e63c2f08a10 h1:Frnccbp+ok2GkUS2tC84yAq/U9Vg+0sIO7aRL3T4Xnc= -golang.org/x/net v0.3.1-0.20221206200815-1e63c2f08a10/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= +golang.org/x/net v0.7.0 h1:rJrUqqhjsgNp7KqAIc25s9pZnjU7TUcSY7HcVZjdn1g= +golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -407,12 +407,12 @@ golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.3.0 h1:w8ZOecv6NaNa/zC8944JTU3vz4u6Lagfk4RPQxv92NQ= -golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.3.0 h1:qoo4akIqOcDME5bhc/NgxUdovd6BSS2uMsVjB56q1xI= -golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA= +golang.org/x/term v0.5.0 h1:n2a8QNdAb0sZNpU9R1ALUXBbY+w51fCQDN+7EdxNBsY= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -420,8 +420,8 @@ golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.5.0 h1:OLmvp0KP+FVG99Ct/qFiL/Fhk4zp4QQnZ7b2U+5piUM= -golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.7.0 h1:4BRB4x83lYWy72KwLD/qYDuTu7q9PjSagHvijDw7cLo= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -585,16 +585,16 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -k8s.io/api v0.26.1 h1:f+SWYiPd/GsiWwVRz+NbFyCgvv75Pk9NK6dlkZgpCRQ= -k8s.io/api v0.26.1/go.mod h1:xd/GBNgR0f707+ATNyPmQ1oyKSgndzXij81FzWGsejg= -k8s.io/apiextensions-apiserver v0.26.1 h1:cB8h1SRk6e/+i3NOrQgSFij1B2S0Y0wDoNl66bn8RMI= -k8s.io/apiextensions-apiserver v0.26.1/go.mod h1:AptjOSXDGuE0JICx/Em15PaoO7buLwTs0dGleIHixSM= -k8s.io/apimachinery v0.26.1 h1:8EZ/eGJL+hY/MYCNwhmDzVqq2lPl3N3Bo8rvweJwXUQ= -k8s.io/apimachinery v0.26.1/go.mod h1:tnPmbONNJ7ByJNz9+n9kMjNP8ON+1qoAIIC70lztu74= -k8s.io/client-go v0.26.1 h1:87CXzYJnAMGaa/IDDfRdhTzxk/wzGZ+/HUQpqgVSZXU= -k8s.io/client-go v0.26.1/go.mod h1:IWNSglg+rQ3OcvDkhY6+QLeasV4OYHDjdqeWkDQZwGE= -k8s.io/component-base v0.26.1 h1:4ahudpeQXHZL5kko+iDHqLj/FSGAEUnSVO0EBbgDd+4= -k8s.io/component-base v0.26.1/go.mod h1:VHrLR0b58oC035w6YQiBSbtsf0ThuSwXP+p5dD/kAWU= +k8s.io/api v0.26.3 h1:emf74GIQMTik01Aum9dPP0gAypL8JTLl/lHa4V9RFSU= +k8s.io/api v0.26.3/go.mod h1:PXsqwPMXBSBcL1lJ9CYDKy7kIReUydukS5JiRlxC3qE= +k8s.io/apiextensions-apiserver v0.26.3 h1:5PGMm3oEzdB1W/FTMgGIDmm100vn7IaUP5er36dB+YE= +k8s.io/apiextensions-apiserver v0.26.3/go.mod h1:jdA5MdjNWGP+njw1EKMZc64xAT5fIhN6VJrElV3sfpQ= +k8s.io/apimachinery v0.26.3 h1:dQx6PNETJ7nODU3XPtrwkfuubs6w7sX0M8n61zHIV/k= +k8s.io/apimachinery v0.26.3/go.mod h1:ats7nN1LExKHvJ9TmwootT00Yz05MuYqPXEXaVeOy5I= +k8s.io/client-go v0.26.3 h1:k1UY+KXfkxV2ScEL3gilKcF7761xkYsSD6BC9szIu8s= +k8s.io/client-go v0.26.3/go.mod h1:ZPNu9lm8/dbRIPAgteN30RSXea6vrCpFvq+MateTUuQ= +k8s.io/component-base v0.26.3 h1:oC0WMK/ggcbGDTkdcqefI4wIZRYdK3JySx9/HADpV0g= +k8s.io/component-base v0.26.3/go.mod h1:5kj1kZYwSC6ZstHJN7oHBqcJC6yyn41eR+Sqa/mQc8E= k8s.io/klog/v2 v2.80.1 h1:atnLQ121W371wYYFawwYx1aEY2eUfs4l3J72wtgAwV4= k8s.io/klog/v2 v2.80.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280 h1:+70TFaan3hfJzs+7VK2o+OGxg8HsuBr/5f6tVAjDu6E= diff --git a/infra/permissions/main.tf b/infra/permissions/main.tf index a88e2d67..b250c5e5 100644 --- a/infra/permissions/main.tf +++ b/infra/permissions/main.tf @@ -18,13 +18,15 @@ terraform { required_providers { google = { source = "hashicorp/google" - version = "4.48.0" + version = "4.58.0" } } } provider "google" { + user_project_override = true + billing_project = var.project_id } # Enable gcloud project APIs @@ -43,6 +45,7 @@ locals { "replicapoolupdater.googleapis.com", "resourceviews.googleapis.com", "servicemanagement.googleapis.com", + "servicenetworking.googleapis.com", "sql-component.googleapis.com", "sqladmin.googleapis.com", "storage-api.googleapis.com"]) diff --git a/infra/resources/database.tf b/infra/resources/database.tf index 2180c4f3..d9f2cd9f 100644 --- a/infra/resources/database.tf +++ b/infra/resources/database.tf @@ -24,7 +24,7 @@ resource "random_id" "db_password" { # See versions at https://registry.terraform.io/providers/hashicorp/google/latest/docs/resources/sql_database_instance#database_version resource "google_sql_database_instance" "instance" { - name = "inst${random_id.db_name.hex}" + name = "inst${random_id.db_name.hex}${var.environment_name}" project = var.project_id region = var.gcloud_region database_version = "POSTGRES_13" @@ -44,7 +44,7 @@ resource "google_sql_database" "db" { # See versions at https://registry.terraform.io/providers/hashicorp/google/latest/docs/resources/sql_database_instance#database_version resource "google_sql_database_instance" "mysql" { - name = "mysql${random_id.db_name.hex}" + name = "mysql${random_id.db_name.hex}${var.environment_name}" project = var.project_id region = var.gcloud_region database_version = "MYSQL_8_0" @@ -72,7 +72,7 @@ resource "google_sql_user" "mysql_user" { # See versions at https://registry.terraform.io/providers/hashicorp/google/latest/docs/resources/sql_database_instance#database_version resource "google_sql_database_instance" "mssql" { - name = "mssql${random_id.db_name.hex}" + name = "mssql${random_id.db_name.hex}${var.environment_name}" project = var.project_id region = var.gcloud_region database_version = "SQLSERVER_2019_EXPRESS" diff --git a/infra/resources/main.tf b/infra/resources/main.tf index d0982b13..1a642b04 100644 --- a/infra/resources/main.tf +++ b/infra/resources/main.tf @@ -18,13 +18,26 @@ terraform { required_providers { google = { source = "hashicorp/google" - version = "4.48.0" + version = "4.58.0" + } + google-beta = { + source = "hashicorp/google-beta" + version = "4.58.0" } } } provider "google" { + user_project_override = true + billing_project = var.project_id +} + +provider "google-beta" { + region = "us-central1" + zone = "us-central1-a" + user_project_override = true + billing_project = var.project_id } # Enable gcloud project APIs @@ -63,6 +76,15 @@ locals { } kubeconfig = var.kubeconfig_path } + private = { + postgres = { + instance = google_sql_database_instance.private_postgres.connection_name + dbName = google_sql_database.private_db.name + rootUser = "postgres" + rootPassword = random_id.db_password.hex + } + kubeconfig = var.private_kubeconfig_path + } } } diff --git a/infra/resources/private-database.tf b/infra/resources/private-database.tf new file mode 100644 index 00000000..8bc0e4ce --- /dev/null +++ b/infra/resources/private-database.tf @@ -0,0 +1,61 @@ +/** + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +resource "random_id" "private_db_name_suffix" { + byte_length = 4 +} + + +# See versions at https://registry.terraform.io/providers/hashicorp/google/latest/docs/resources/sql_database_instance#database_version +resource "google_sql_database_instance" "private_postgres" { + provider = google-beta + + name = "privateinst${random_id.private_db_name_suffix.hex}${var.environment_name}" + project = var.project_id + region = var.gcloud_region + database_version = "POSTGRES_13" + depends_on = [google_service_networking_connection.private_vpc_connection] + + settings { + tier = "db-f1-micro" + user_labels = local.standard_labels + ip_configuration { + ipv4_enabled = false + private_network = google_compute_network.private_k8s_network.id + enable_private_path_for_google_cloud_services = true + } + } + deletion_protection = "true" + root_password = random_id.db_password.hex +} + +resource "google_sql_database" "private_db" { + name = "db" + instance = google_sql_database_instance.private_postgres.name + project = var.project_id +} + + +output "private_db_root_password" { + value = random_id.db_password.hex +} +output "private_db_instance_name" { + value = google_sql_database_instance.private_postgres.name +} +output "private_db_database_name" { + value = google_sql_database.private_db.name +} diff --git a/infra/resources/private-network.tf b/infra/resources/private-network.tf new file mode 100644 index 00000000..9bed5e2a --- /dev/null +++ b/infra/resources/private-network.tf @@ -0,0 +1,66 @@ +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +resource "google_compute_network" "private_k8s_network" { + provider = google-beta + project = var.project_id + + name = "test-vpc-${var.environment_name}" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "private_k8s_network" { + provider = google-beta + project = var.project_id + region = var.gcloud_region + + name = "test-vpc-subnetwork-${var.environment_name}" + ip_cidr_range = "10.2.0.0/16" + network = google_compute_network.private_k8s_network.id + + secondary_ip_range { + range_name = "services-range" + ip_cidr_range = "192.168.1.0/24" + } + + secondary_ip_range { + range_name = "pod-ranges" + ip_cidr_range = "192.168.64.0/22" + } + + secondary_ip_range { + range_name = "nodepool-pod-ranges" + ip_cidr_range = "192.168.128.0/22" + } +} + +resource "google_compute_global_address" "private_ip_address" { + provider = google-beta + project = var.project_id + + name = "test-vpc-private-ip-address-${var.environment_name}" + purpose = "VPC_PEERING" + address_type = "INTERNAL" + prefix_length = 16 + network = google_compute_network.private_k8s_network.id +} + +resource "google_service_networking_connection" "private_vpc_connection" { + provider = google-beta + + network = google_compute_network.private_k8s_network.id + service = "servicenetworking.googleapis.com" + reserved_peering_ranges = [google_compute_global_address.private_ip_address.name] +} diff --git a/infra/resources/private_gke_cluster.tf b/infra/resources/private_gke_cluster.tf new file mode 100644 index 00000000..ff1c8617 --- /dev/null +++ b/infra/resources/private_gke_cluster.tf @@ -0,0 +1,137 @@ +/** + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +# From https://github.com/hashicorp/terraform-provider-kubernetes/blob/main/kubernetes/test-infra/gke/main.tf + +resource "google_container_cluster" "private" { + project = var.project_id + name = "operator-private-${var.environment_name}" + location = var.gcloud_zone + min_master_version = data.google_container_engine_versions.supported.latest_master_version + initial_node_count = 2 + + // Alpha features are disabled by default and can be enabled by GKE for a particular GKE control plane version. + // Creating an alpha cluster enables all alpha features by default. + // Ref: https://cloud.google.com/kubernetes-engine/docs/concepts/feature-gates + enable_kubernetes_alpha = var.enable_alpha + + // disalbe the default nodepool and specify node pools as + // separate terraform resources. This way if we + // change the nodepool config, we don't delete the cluster too + remove_default_node_pool = true + resource_labels = local.standard_labels + + network = google_compute_network.private_k8s_network.id + subnetwork = google_compute_subnetwork.private_k8s_network.id + + ip_allocation_policy { + services_secondary_range_name = google_compute_subnetwork.private_k8s_network.secondary_ip_range.0.range_name + cluster_secondary_range_name = google_compute_subnetwork.private_k8s_network.secondary_ip_range.1.range_name + } +} + +resource "google_container_node_pool" "private_preemptible_nodes" { + name = "operator-private-nodes-${var.environment_name}" + cluster = google_container_cluster.private.id + initial_node_count = var.workers_count + version = data.google_container_engine_versions.supported.latest_node_version + location = var.gcloud_zone + + autoscaling { + max_node_count = 10 + min_node_count = 2 + } + + management { + auto_repair = var.enable_alpha ? false : true + auto_upgrade = var.enable_alpha ? false : true + } + + network_config { + enable_private_nodes = false + pod_range = google_compute_subnetwork.private_k8s_network.secondary_ip_range.2.range_name + create_pod_range = false + } + + node_config { + preemptible = true + machine_type = "e2-standard-8" + resource_labels = local.standard_labels + + # Google recommends custom service accounts that have cloud-platform scope and permissions granted via IAM Roles. + service_account = var.nodepool_serviceaccount_email + oauth_scopes = [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/logging.write", + "https://www.googleapis.com/auth/monitoring", + "https://www.googleapis.com/auth/sqlservice.admin", + ] + } +} + + +locals { + # This is the recommended way to produce a kubeconfig file from + # the Google Cloud GKE terraform resource. + private_kubeconfig = { + apiVersion = "v1" + kind = "Config" + preferences = { + colors = true + } + current-context = google_container_cluster.private.name + contexts = [ + { + name = google_container_cluster.private.name + context = { + cluster = google_container_cluster.private.name + user = var.nodepool_serviceaccount_email + namespace = "default" + } + } + ] + clusters = [ + { + name = google_container_cluster.private.name + cluster = { + server = "https://${google_container_cluster.private.endpoint}" + certificate-authority-data = google_container_cluster.private.master_auth[0].cluster_ca_certificate + } + } + ] + users = [ + { + name = var.nodepool_serviceaccount_email + user = { + exec = { + apiVersion = "client.authentication.k8s.io/v1beta1" + command = "gke-gcloud-auth-plugin" + installHint = "Install gke-gcloud-auth-plugin for use with kubectl by following https://cloud.google.com/blog/products/containers-kubernetes/kubectl-auth-changes-in-gke" + provideClusterInfo = true + } + } + } + ] + } + +} + +resource "local_file" "private_kubeconfig" { + content = yamlencode(local.private_kubeconfig) + filename = var.private_kubeconfig_path +} diff --git a/infra/resources/vars.tf b/infra/resources/vars.tf index f1a4c039..9624ef08 100644 --- a/infra/resources/vars.tf +++ b/infra/resources/vars.tf @@ -28,6 +28,10 @@ variable "kubeconfig_path" { type = string description = "The path to save the kubeconfig file" } +variable "private_kubeconfig_path" { + type = string + description = "The path to save the kubeconfig file" +} variable "output_json_path" { type = string description = "The path to save test-infra.json file, input for e2e tests" diff --git a/infra/run.sh b/infra/run.sh index a12767c5..8c1a8ae2 100755 --- a/infra/run.sh +++ b/infra/run.sh @@ -44,6 +44,8 @@ # E2E_PROJECT_ID - The Google Cloud project ID to act upon. # KUBECONFIG_E2E - The output filename for the kubeconfig json file # for the kubernetes cluster for the e2e environment. +# PRIVATE_KUBECONFIG_E2E - The output filename for the kubeconfig json file +# for the private ip kubernetes cluster for the e2e environment. # E2E_DOCKER_URL_FILE - The output filename for a text file containing the # URL to the docker container registry for the e2e test environment. # @@ -84,6 +86,7 @@ function apply() { -var "gcloud_docker_url_file=$E2E_DOCKER_URL_FILE" \ -var "project_id=$E2E_PROJECT_ID" \ -var "kubeconfig_path=$KUBECONFIG_E2E" \ + -var "private_kubeconfig_path=$PRIVATE_KUBECONFIG_E2E" \ -var "environment_name=$ENVIRONMENT_NAME" \ -var "nodepool_serviceaccount_email=$nodepool_serviceaccount_email" @@ -97,6 +100,7 @@ function destroy() { -var "gcloud_docker_url_file=$E2E_DOCKER_URL_FILE" \ -var "project_id=$E2E_PROJECT_ID" \ -var "kubeconfig_path=$KUBECONFIG_E2E" \ + -var "private_kubeconfig_path=$PRIVATE_KUBECONFIG_E2E" \ -var "environment_name=$ENVIRONMENT_NAME" \ -var "nodepool_serviceaccount_email=$nodepool_serviceaccount_email" } @@ -137,6 +141,7 @@ EOF -var "gcloud_docker_url_file=$E2E_DOCKER_URL_FILE" \ -var "project_id=$E2E_PROJECT_ID" \ -var "kubeconfig_path=$KUBECONFIG_E2E" \ + -var "private_kubeconfig_path=$PRIVATE_KUBECONFIG_E2E" \ -var "environment_name=$ENVIRONMENT_NAME" \ -var "nodepool_serviceaccount_email=$NODEPOOL_SERVICEACCOUNT_EMAIL" @@ -178,6 +183,12 @@ if [[ -z "$KUBECONFIG_E2E" ]]; then FAIL=1 fi +#expects PRIVATE_KUBECONFIG_E2E +if [[ -z "$PRIVATE_KUBECONFIG_E2E" ]]; then + echo "expects PRIVATE_KUBECONFIG_E2E to be set the location where kubeconfig should be written." + FAIL=1 +fi + #expects $E2E_DOCKER_URL_FILE if [[ -z "$E2E_DOCKER_URL_FILE" ]]; then echo "expects E2E_DOCKER_URL_FILE to be set the location where docker url should be written." diff --git a/installer/cloud-sql-proxy-operator.yaml b/installer/cloud-sql-proxy-operator.yaml index 487d5402..0124c7a0 100644 --- a/installer/cloud-sql-proxy-operator.yaml +++ b/installer/cloud-sql-proxy-operator.yaml @@ -45,10 +45,10 @@ spec: singular: authproxyworkload scope: Namespaced versions: - - name: v1alpha1 + - name: v1 schema: openAPIV3Schema: - description: AuthProxyWorkload declares how a Cloud SQL Proxy container should be applied to a matching set of workloads, and shows the status of those proxy containers. This is the Schema for the authproxyworkloads API. + description: AuthProxyWorkload declares how a Cloud SQL Proxy container should be applied to a matching set of workloads, and shows the status of those proxy containers. properties: apiVersion: description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' @@ -59,11 +59,26 @@ spec: metadata: type: object spec: - description: AuthProxyWorkloadSpec defines the desired state of AuthProxyWorkload + description: AuthProxyWorkloadSpec describes where and how to configure the proxy. properties: authProxyContainer: - description: AuthProxyContainer describes the resources and config for the Auth Proxy container + description: AuthProxyContainer describes the resources and config for the Auth Proxy container. properties: + adminServer: + description: AdminServer specifies the config for the proxy's admin service which is available to other containers in the same pod. + properties: + enableAPIs: + description: 'EnableAPIs specifies the list of admin APIs to enable. At least one API must be enabled. Possible values: - "Debug" will enable pprof debugging by setting the `--debug` cli flag. - "QuitQuitQuit" will enable pprof debugging by setting the `--quitquitquit` cli flag.' + items: + type: string + minItems: 1 + type: array + port: + description: Port the port for the proxy's localhost-only admin server. This sets the proxy container's CLI argument `--admin-port` + format: int32 + minimum: 1 + type: integer + type: object container: description: Container is debugging parameter that when specified will override the proxy container with a completely custom Container spec. properties: @@ -575,7 +590,7 @@ spec: description: 'Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' properties: claims: - description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable." + description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable. It can only be set for containers." items: description: ResourceClaim references one entry in PodSpec.ResourceClaims. properties: @@ -863,21 +878,23 @@ spec: - name type: object image: - description: Image is the URL to the proxy image. Optional, by default the operator will use the latest known compatible proxy image. + description: "Image is the URL to the proxy image. Optional, by default the operator will use the latest Cloud SQL Auth Proxy version as of the release of the operator. \n The operator ensures that all workloads configured with the default proxy image are upgraded automatically to use to the latest released proxy image. \n When the customer upgrades the operator, the operator upgrades all workloads using the default proxy image to the latest proxy image. The change to the proxy container image is applied in accordance with the RolloutStrategy." type: string maxConnections: description: MaxConnections limits the number of connections. Default value is no limit. This sets the proxy container's CLI argument `--max-connections` format: int64 + minimum: 0 type: integer maxSigtermDelay: description: MaxSigtermDelay is the maximum number of seconds to wait for connections to close after receiving a TERM signal. This sets the proxy container's CLI argument `--max-sigterm-delay` and configures `terminationGracePeriodSeconds` on the workload's PodSpec. format: int64 + minimum: 0 type: integer resources: description: Resources specifies the resources required for the proxy pod. properties: claims: - description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable." + description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable. It can only be set for containers." items: description: ResourceClaim references one entry in PodSpec.ResourceClaims. properties: @@ -923,35 +940,61 @@ spec: telemetry: description: Telemetry specifies how the proxy should expose telemetry. Optional, by default properties: + disableMetrics: + description: DisableMetrics disables Cloud Monitoring testintegration (used with telemetryProject) This sets the proxy container's CLI argument `--disable-metrics` + type: boolean + disableTraces: + description: DisableTraces disables Cloud Trace testintegration (used with telemetryProject) This sets the proxy container's CLI argument `--disable-traces` + type: boolean httpPort: description: HTTPPort the port for Prometheus and health check server. This sets the proxy container's CLI argument `--http-port` format: int32 type: integer + prometheus: + description: Prometheus Enables Prometheus HTTP endpoint /metrics on localhost This sets the proxy container's CLI argument `--prometheus` + type: boolean + prometheusNamespace: + description: PrometheusNamespace is used the provided Prometheus namespace for metrics This sets the proxy container's CLI argument `--prometheus-namespace` + type: string + quotaProject: + description: QuotaProject Specifies the project to use for Cloud SQL Admin API quota tracking. The IAM principal must have the "serviceusage.services.use" permission for the given project. See https://cloud.google.com/service-usage/docs/overview and https://cloud.google.com/storage/docs/requester-pays This sets the proxy container's CLI argument `--quota-project` + type: string + telemetryPrefix: + description: TelemetryPrefix is the prefix for Cloud Monitoring metrics. This sets the proxy container's CLI argument `--telemetry-prefix` + type: string + telemetryProject: + description: TelemetryProject enables Cloud Monitoring and Cloud Trace with the provided project ID. This sets the proxy container's CLI argument `--telemetry-project` + type: string + telemetrySampleRate: + description: TelemetrySampleRate is the Cloud Trace sample rate. A smaller number means more traces. This sets the proxy container's CLI argument `--telemetry-sample-rate` + type: integer type: object type: object instances: - description: Instances lists the Cloud SQL instances to connect + description: Instances describes the Cloud SQL instances to configure on the proxy container. items: - description: "InstanceSpec describes the configuration for how the proxy should expose a Cloud SQL database instance to a workload. The simplest possible configuration declares just the connection string and the port number or unix socket. \n For example, for a TCP port: \n { \"connectionString\":\"my-project:us-central1:my-db-server\", \"port\":5000 } \n or for a unix socket: \n { \"connectionString\":\"my-project:us-central1:my-db-server\", \"unixSocketPath\" : \"/mnt/db/my-db-server\" } \n You may allow the operator to choose a non-conflicting TCP port or unix socket instead of explicitly setting the port or socket path. This may be easier to manage when workload needs to connect to many databases. \n For example, for a TCP port: \n { \"connectionString\":\"my-project:us-central1:my-db-server\", \"portEnvName\":\"MY_DB_SERVER_PORT\" \"hostEnvName\":\"MY_DB_SERVER_HOST\" } \n will set environment variables MY_DB_SERVER_PORT MY_DB_SERVER_HOST with the value of the TCP port and hostname. Then, the application can read these values to connect to the database through the proxy. \n or for a unix socket: \n { \"connectionString\":\"my-project:us-central1:my-db-server\", \"unixSocketPathEnvName\" : \"MY_DB_SERVER_SOCKET_DIR\" } \n will set environment variables MY_DB_SERVER_SOCKET_DIR with the value of the unix socket path. Then, the application can read this value to connect to the database through the proxy." + description: "InstanceSpec describes the configuration for how the proxy should expose a Cloud SQL database instance to a workload. \n In the minimum recommended configuration, the operator will choose a non-conflicting TCP port and set environment variables MY_DB_SERVER_PORT MY_DB_SERVER_HOST with the value of the TCP port and hostname. The application can read these values to connect to the database through the proxy. For example: \n `{ \"connectionString\":\"my-project:us-central1:my-db-server\", \"portEnvName\":\"MY_DB_SERVER_PORT\" \"hostEnvName\":\"MY_DB_SERVER_HOST\" }` \n If you want to assign a specific port number for a database, set the `port` field. For example: \n `{ \"connectionString\":\"my-project:us-central1:my-db-server\", \"port\":5000 }`" properties: autoIAMAuthN: - description: AutoIAMAuthN Enables IAM Authentication for this instance. Optional, default false. + description: AutoIAMAuthN (optional) Enables IAM Authentication for this instance. Default value is false. type: boolean connectionString: - description: ConnectionString is the Cloud SQL instance. + description: ConnectionString is the connection string for the Cloud SQL Instance in the format `project_id:region:instance_name` + pattern: ^([^:]+(:[^:]+)?):([^:]+):([^:]+)$ type: string hostEnvName: description: HostEnvName The name of the environment variable containing this instances tcp hostname Optional, when set this environment variable will be added to all containers in the workload. type: string port: - description: Port sets the tcp port for this instance. Optional, if not set, a value will be automatically assigned by the operator and set as an environment variable on all containers in the workload named according to PortEnvName. The operator will choose a port so that it does not conflict with other ports on the workload. + description: Port (optional) sets the tcp port for this instance. If not set, a value will be automatically assigned by the operator and set as an environment variable on all containers in the workload named according to PortEnvName. The operator will choose a port so that it does not conflict with other ports on the workload. format: int32 + minimum: 1 type: integer portEnvName: description: PortEnvName is name of the environment variable containing this instance's tcp port. Optional, when set this environment variable will be added to all containers in the workload. type: string privateIP: - description: PrivateIP Enable connection to the Cloud SQL instance's private ip for this instance. Optional, default false. + description: PrivateIP (optional) Enable connection to the Cloud SQL instance's private ip for this instance. Default value is false. type: boolean unixSocketPath: description: UnixSocketPath is the path to the unix socket where the proxy will listen for connnections. This will be mounted to all containers in the pod. @@ -963,7 +1006,7 @@ spec: minItems: 1 type: array workloadSelector: - description: Workload selects the workload to + description: Workload selects the workload where the proxy container will be added. properties: kind: description: 'Kind specifies what kind of workload Supported kinds: Deployment, StatefulSet, Pod, ReplicaSet,DaemonSet, Job, CronJob Example: "Deployment" "Deployment.v1" or "Deployment.v1.apps".' @@ -973,7 +1016,7 @@ spec: description: Name specifies the name of the resource to select. type: string selector: - description: Selector selects resources using labels. See "Label selectors" in the kubernetes docs https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors + description: Selector (optional) selects resources using labels. See "Label selectors" in the kubernetes docs https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. @@ -1341,7 +1384,7 @@ data: # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - apiVersion: controller-runtime.sigs.k8s.io/v1alpha1 + apiVersion: controller-runtime.sigs.k8s.io/v1 kind: ControllerManagerConfig health: healthProbeBindAddress: :8081 @@ -1422,7 +1465,7 @@ spec: - --leader-elect command: - /manager - image: gcr.io/cloud-sql-connectors/cloud-sql-operator/cloud-sql-proxy-operator:0.3.0 + image: gcr.io/cloud-sql-connectors/cloud-sql-operator/cloud-sql-proxy-operator:0.4.0 livenessProbe: httpGet: path: /healthz @@ -1554,14 +1597,14 @@ webhooks: service: name: cloud-sql-proxy-operator-webhook-service namespace: cloud-sql-proxy-operator-system - path: /mutate-cloudsql-cloud-google-com-v1alpha1-authproxyworkload + path: /mutate-cloudsql-cloud-google-com-v1-authproxyworkload failurePolicy: Fail name: mauthproxyworkload.kb.io rules: - apiGroups: - cloudsql.cloud.google.com apiVersions: - - v1alpha1 + - v1 operations: - CREATE - UPDATE @@ -1582,14 +1625,14 @@ webhooks: service: name: cloud-sql-proxy-operator-webhook-service namespace: cloud-sql-proxy-operator-system - path: /validate-cloudsql-cloud-google-com-v1alpha1-authproxyworkload + path: /validate-cloudsql-cloud-google-com-v1-authproxyworkload failurePolicy: Fail name: vauthproxyworkload.kb.io rules: - apiGroups: - cloudsql.cloud.google.com apiVersions: - - v1alpha1 + - v1 operations: - CREATE - UPDATE diff --git a/installer/install.sh b/installer/install.sh index d664a8c8..549376e1 100644 --- a/installer/install.sh +++ b/installer/install.sh @@ -16,11 +16,11 @@ set -euxo # exit 1 from the script when command fails -# If CSQL_OPERATOR_VERSION is not set, use the release version: v0.3.0. -CSQL_OPERATOR_VERSION="${CSQL_OPERATOR_VERSION:-v0.3.0}" +# If CSQL_OPERATOR_VERSION is not set, use the release version: v0.4.0. +CSQL_OPERATOR_VERSION="${CSQL_OPERATOR_VERSION:-v0.4.0}" -# If CSQL_CERT_MANAGER_VERSION is not set, use the default: v1.9.1. -CSQL_CERT_MANAGER_VERSION="${CSQL_CERT_MANAGER_VERSION:-v1.9.1}" +# If CSQL_CERT_MANAGER_VERSION is not set, use the default: v1.11.0. +CSQL_CERT_MANAGER_VERSION="${CSQL_CERT_MANAGER_VERSION:-v1.11.0}" # If CSQL_OPERATOR_URL is not set, use the default value from the CSQL_OPERATOR_VERSION CSQL_OPERATOR_URL="${CSQL_OPERATOR_URL:-https://storage.googleapis.com/cloud-sql-connectors/cloud-sql-proxy-operator/$CSQL_OPERATOR_VERSION/cloud-sql-proxy-operator.yaml}" diff --git a/internal/api/v1/authproxyworkload_test.go b/internal/api/v1/authproxyworkload_test.go new file mode 100644 index 00000000..61e76868 --- /dev/null +++ b/internal/api/v1/authproxyworkload_test.go @@ -0,0 +1,560 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1_test + +import ( + "testing" + + cloudsqlapi "github.com/GoogleCloudPlatform/cloud-sql-proxy-operator/internal/api/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func ptr[T int | int32 | int64 | string | bool](i T) *T { + return &i +} + +func TestAuthProxyWorkload_ValidateCreate_InstanceSpec(t *testing.T) { + + data := []struct { + desc string + spec []cloudsqlapi.InstanceSpec + wantValid bool + }{ + { + desc: "Invalid, empty instances", + wantValid: false, + }, + { + desc: "Invalid, Instance configured without PortEnvName, Port, or UnixSocketPath", + spec: []cloudsqlapi.InstanceSpec{{ + ConnectionString: "proj:region:db2", + }}, + wantValid: false, + }, + { + desc: "Valid, Instance configured with UnixSocketPath", + spec: []cloudsqlapi.InstanceSpec{{ + ConnectionString: "proj:region:db2", + UnixSocketPath: "/db/socket", + }}, + wantValid: true, + }, + { + desc: "Invalid, Instance configured with UnixSocketPath and Port", + spec: []cloudsqlapi.InstanceSpec{{ + ConnectionString: "proj:region:db2", + UnixSocketPath: "/db/socket", + Port: ptr(int32(2443)), + }}, + wantValid: false, + }, + { + desc: "Valid, Instance configured with valid port", + spec: []cloudsqlapi.InstanceSpec{{ + ConnectionString: "proj:region:db2", + Port: ptr(int32(2443)), + }}, + wantValid: true, + }, + { + desc: "Invalid, Instance configured with bad port", + spec: []cloudsqlapi.InstanceSpec{{ + ConnectionString: "proj:region:db2", + Port: ptr(int32(-22)), + }}, + wantValid: false, + }, + { + desc: "Invalid, Instance configured with bad portEnvName", + spec: []cloudsqlapi.InstanceSpec{{ + ConnectionString: "proj:region:db2", + PortEnvName: "22423!", + }}, + wantValid: false, + }, + { + desc: "Invalid, Instance configured with bad hostEnvName", + spec: []cloudsqlapi.InstanceSpec{{ + ConnectionString: "proj:region:db2", + HostEnvName: "22423!", + }}, + wantValid: false, + }, + { + desc: "Invalid, Instance configured with bad UnixSocketPathEnvName", + spec: []cloudsqlapi.InstanceSpec{{ + ConnectionString: "proj:region:db2", + UnixSocketPathEnvName: "22423!", + UnixSocketPath: "/db/socket", + }}, + wantValid: false, + }, + { + desc: "Invalid, Instance configured with bad relative UnixSocketPath", + spec: []cloudsqlapi.InstanceSpec{{ + ConnectionString: "proj:region:db2", + UnixSocketPath: "db/socket", + }}, + wantValid: false, + }, + } + for _, tc := range data { + t.Run(tc.desc, func(t *testing.T) { + p := cloudsqlapi.AuthProxyWorkload{ + ObjectMeta: v1.ObjectMeta{Name: "sample"}, + Spec: cloudsqlapi.AuthProxyWorkloadSpec{ + Workload: cloudsqlapi.WorkloadSelectorSpec{ + Kind: "Deployment", + Name: "webapp", + }, + Instances: tc.spec, + }, + } + p.Default() + err := p.ValidateCreate() + gotValid := err == nil + switch { + case tc.wantValid && !gotValid: + t.Errorf("wants create valid, got error %v", err) + printFieldErrors(t, err) + case !tc.wantValid && gotValid: + t.Errorf("wants an error on create, got no error") + default: + t.Logf("create passed %s", tc.desc) + // test passes, do nothing. + } + }) + } + +} +func TestAuthProxyWorkload_ValidateCreate_WorkloadSpec(t *testing.T) { + data := []struct { + desc string + spec cloudsqlapi.WorkloadSelectorSpec + wantValid bool + }{ + { + desc: "Valid WorkloadSelectorSpec with Name", + spec: cloudsqlapi.WorkloadSelectorSpec{ + Kind: "Deployment", + Name: "webapp", + }, + wantValid: true, + }, + { + desc: "Valid WorkloadSelectorSpec with Selector", + spec: cloudsqlapi.WorkloadSelectorSpec{ + Kind: "Deployment", + Selector: &v1.LabelSelector{ + MatchLabels: map[string]string{"app": "sample"}, + }, + }, + wantValid: true, + }, + { + desc: "Invalid, both workload selector and name both set", + spec: cloudsqlapi.WorkloadSelectorSpec{ + Kind: "Deployment", + Name: "webapp", + Selector: &v1.LabelSelector{ + MatchLabels: map[string]string{"app": "sample"}, + }, + }, + wantValid: false, + }, + { + desc: "Invalid, WorkloadSelector missing name and selector", + spec: cloudsqlapi.WorkloadSelectorSpec{Kind: "Deployment"}, + wantValid: false, + }, + { + desc: "Valid, Instance configured with PortEnvName", + spec: cloudsqlapi.WorkloadSelectorSpec{ + Kind: "Deployment", + Name: "webapp", + }, + wantValid: true, + }, + } + + for _, tc := range data { + t.Run(tc.desc, func(t *testing.T) { + p := cloudsqlapi.AuthProxyWorkload{ + ObjectMeta: v1.ObjectMeta{Name: "sample"}, + Spec: cloudsqlapi.AuthProxyWorkloadSpec{ + Workload: tc.spec, + Instances: []cloudsqlapi.InstanceSpec{{ + ConnectionString: "proj:region:db2", + Port: ptr(int32(2443)), + }}, + }, + } + p.Default() + err := p.ValidateCreate() + gotValid := err == nil + switch { + case tc.wantValid && !gotValid: + t.Errorf("wants create valid, got error %v", err) + printFieldErrors(t, err) + case !tc.wantValid && gotValid: + t.Errorf("wants an error on create, got no error") + default: + t.Logf("create passed %s", tc.desc) + // test passes, do nothing. + } + }) + } +} +func TestAuthProxyWorkload_ValidateCreate_AuthProxyContainerSpec(t *testing.T) { + wantPort := int32(9393) + + data := []struct { + desc string + spec cloudsqlapi.AuthProxyContainerSpec + wantValid bool + }{ + + { + desc: "Valid, Debug and AdminPort set", + spec: cloudsqlapi.AuthProxyContainerSpec{ + AdminServer: &cloudsqlapi.AdminServerSpec{ + EnableAPIs: []string{"Debug"}, + Port: wantPort, + }, + }, + wantValid: true, + }, + { + desc: "Invalid, Debug set without AdminPort", + spec: cloudsqlapi.AuthProxyContainerSpec{ + AdminServer: &cloudsqlapi.AdminServerSpec{ + EnableAPIs: []string{"Debug"}, + }, + }, + wantValid: false, + }, + { + desc: "Invalid, EnableAPIs is empty", + spec: cloudsqlapi.AuthProxyContainerSpec{ + AdminServer: &cloudsqlapi.AdminServerSpec{ + Port: wantPort, + }, + }, + wantValid: false, + }, + { + desc: "Invalid, EnableAPIs is has a bad value", + spec: cloudsqlapi.AuthProxyContainerSpec{ + AdminServer: &cloudsqlapi.AdminServerSpec{ + EnableAPIs: []string{"Nope", "Debug"}, + Port: wantPort, + }, + }, + wantValid: false, + }, + } + + for _, tc := range data { + t.Run(tc.desc, func(t *testing.T) { + p := cloudsqlapi.AuthProxyWorkload{ + ObjectMeta: v1.ObjectMeta{Name: "sample"}, + Spec: cloudsqlapi.AuthProxyWorkloadSpec{ + Workload: cloudsqlapi.WorkloadSelectorSpec{ + Kind: "Deployment", + Name: "webapp", + }, + AuthProxyContainer: &tc.spec, + Instances: []cloudsqlapi.InstanceSpec{{ + ConnectionString: "proj:region:db2", + Port: ptr(int32(2443)), + }}, + }, + } + p.Default() + err := p.ValidateCreate() + gotValid := err == nil + switch { + case tc.wantValid && !gotValid: + t.Errorf("wants create valid, got error %v", err) + printFieldErrors(t, err) + case !tc.wantValid && gotValid: + t.Errorf("wants an error on create, got no error") + default: + t.Logf("create passed %s", tc.desc) + // test passes, do nothing. + } + }) + } +} + +func TestAuthProxyWorkload_ValidateUpdate(t *testing.T) { + data := []struct { + desc string + spec cloudsqlapi.AuthProxyWorkloadSpec + oldSpec cloudsqlapi.AuthProxyWorkloadSpec + wantValid bool + }{ + { + desc: "Valid, update adds another instance", + spec: cloudsqlapi.AuthProxyWorkloadSpec{ + Workload: cloudsqlapi.WorkloadSelectorSpec{ + Kind: "Deployment", + Name: "webapp", + }, + Instances: []cloudsqlapi.InstanceSpec{{ + ConnectionString: "proj:region:db1", + PortEnvName: "DB_PORT", + }}, + }, + oldSpec: cloudsqlapi.AuthProxyWorkloadSpec{ + Workload: cloudsqlapi.WorkloadSelectorSpec{ + Kind: "Deployment", + Name: "webapp", + }, + Instances: []cloudsqlapi.InstanceSpec{ + { + ConnectionString: "proj:region:db1", + PortEnvName: "DB_PORT", + }, + { + ConnectionString: "proj:region:db2", + PortEnvName: "DB_PORT2", + }, + }, + }, + wantValid: true, + }, + { + desc: "Invalid, WorkloadSelectorSpec.Kind changed", + spec: cloudsqlapi.AuthProxyWorkloadSpec{ + Workload: cloudsqlapi.WorkloadSelectorSpec{ + Kind: "Deployment", + Name: "webapp", + }, + Instances: []cloudsqlapi.InstanceSpec{{ + ConnectionString: "proj:region:db2", + PortEnvName: "DB_PORT", + }}, + }, + oldSpec: cloudsqlapi.AuthProxyWorkloadSpec{ + Workload: cloudsqlapi.WorkloadSelectorSpec{ + Kind: "StatefulSet", + Name: "webapp", + }, + Instances: []cloudsqlapi.InstanceSpec{{ + ConnectionString: "proj:region:db1", + PortEnvName: "DB_PORT", + }}, + }, + wantValid: false, + }, + { + desc: "Invalid, WorkloadSelectorSpec.Name changed", + spec: cloudsqlapi.AuthProxyWorkloadSpec{ + Workload: cloudsqlapi.WorkloadSelectorSpec{ + Kind: "Deployment", + Name: "things", + }, + Instances: []cloudsqlapi.InstanceSpec{{ + ConnectionString: "proj:region:db2", + PortEnvName: "DB_PORT", + }}, + }, + oldSpec: cloudsqlapi.AuthProxyWorkloadSpec{ + Workload: cloudsqlapi.WorkloadSelectorSpec{ + Kind: "Deployment", + Name: "webapp", + }, + Instances: []cloudsqlapi.InstanceSpec{{ + ConnectionString: "proj:region:db1", + PortEnvName: "DB_PORT", + }}, + }, + wantValid: false, + }, + { + desc: "Invalid, WorkloadSelectorSpec.Selector changed", + spec: cloudsqlapi.AuthProxyWorkloadSpec{ + Workload: cloudsqlapi.WorkloadSelectorSpec{ + Kind: "Deployment", + Selector: &v1.LabelSelector{ + MatchLabels: map[string]string{"app": "sample"}, + }, + }, + Instances: []cloudsqlapi.InstanceSpec{{ + ConnectionString: "proj:region:db2", + PortEnvName: "DB_PORT", + }}, + }, + oldSpec: cloudsqlapi.AuthProxyWorkloadSpec{ + Workload: cloudsqlapi.WorkloadSelectorSpec{ + Kind: "Deployment", + Selector: &v1.LabelSelector{ + MatchLabels: map[string]string{"app": "other"}, + }, + }, + Instances: []cloudsqlapi.InstanceSpec{{ + ConnectionString: "proj:region:db1", + PortEnvName: "DB_PORT", + }}, + }, + }, + } + + for _, tc := range data { + t.Run(tc.desc, func(t *testing.T) { + p := cloudsqlapi.AuthProxyWorkload{ + ObjectMeta: v1.ObjectMeta{Name: "sample"}, + Spec: tc.spec, + } + oldP := cloudsqlapi.AuthProxyWorkload{ + ObjectMeta: v1.ObjectMeta{Name: "sample"}, + Spec: tc.oldSpec, + } + p.Default() + oldP.Default() + + err := p.ValidateUpdate(&oldP) + gotValid := err == nil + + switch { + case tc.wantValid && !gotValid: + t.Errorf("wants update valid, got error %v", err) + case !tc.wantValid && gotValid: + t.Errorf("wants an error on update, got no error") + default: + t.Logf("update passed %s", tc.desc) + // test passes, do nothing. + } + }) + } +} + +func TestAuthProxyWorkload_ValidateUpdate_AuthProxyContainerSpec(t *testing.T) { + data := []struct { + desc string + spec *cloudsqlapi.AuthProxyContainerSpec + oldSpec *cloudsqlapi.AuthProxyContainerSpec + wantValid bool + }{ + { + desc: "Invalid when AuthProxyContainerSpec.RolloutStrategy changes from explict to different default value", + spec: &cloudsqlapi.AuthProxyContainerSpec{ + RolloutStrategy: "None", + }, + oldSpec: &cloudsqlapi.AuthProxyContainerSpec{ + RolloutStrategy: "Workload", + }, + }, + { + desc: "Valid when AuthProxyContainerSpec.RolloutStrategy goes from default to same explicit value", + spec: &cloudsqlapi.AuthProxyContainerSpec{ + RolloutStrategy: "Workload", + }, + wantValid: true, + }, + { + desc: "Invalid when AuthProxyContainerSpec.RolloutStrategy changes from default to different explicit value", + spec: &cloudsqlapi.AuthProxyContainerSpec{ + RolloutStrategy: "None", + }, + wantValid: false, + }, + { + desc: "Invalid when AuthProxyContainerSpec.RolloutStrategy changes to different explicit value", + spec: &cloudsqlapi.AuthProxyContainerSpec{ + RolloutStrategy: "None", + }, + oldSpec: &cloudsqlapi.AuthProxyContainerSpec{ + RolloutStrategy: "Workload", + }, + wantValid: false, + }, + { + desc: "Invalid when AuthProxyContainerSpec.RolloutStrategy changes from explict to different default value", + spec: &cloudsqlapi.AuthProxyContainerSpec{ + RolloutStrategy: "None", + }, + oldSpec: &cloudsqlapi.AuthProxyContainerSpec{ + RolloutStrategy: "Workload", + }, + wantValid: false, + }, + } + for _, tc := range data { + t.Run(tc.desc, func(t *testing.T) { + p := cloudsqlapi.AuthProxyWorkload{ + ObjectMeta: v1.ObjectMeta{Name: "sample"}, + Spec: cloudsqlapi.AuthProxyWorkloadSpec{ + Workload: cloudsqlapi.WorkloadSelectorSpec{ + Kind: "Deployment", + Selector: &v1.LabelSelector{ + MatchLabels: map[string]string{"app": "sample"}, + }, + }, + AuthProxyContainer: tc.spec, + Instances: []cloudsqlapi.InstanceSpec{{ + ConnectionString: "proj:region:db2", + PortEnvName: "DB_PORT", + }}, + }, + } + oldP := cloudsqlapi.AuthProxyWorkload{ + ObjectMeta: v1.ObjectMeta{Name: "sample"}, + Spec: cloudsqlapi.AuthProxyWorkloadSpec{ + Workload: cloudsqlapi.WorkloadSelectorSpec{ + Kind: "Deployment", + Selector: &v1.LabelSelector{ + MatchLabels: map[string]string{"app": "sample"}, + }, + }, + AuthProxyContainer: tc.oldSpec, + Instances: []cloudsqlapi.InstanceSpec{{ + ConnectionString: "proj:region:db2", + PortEnvName: "DB_PORT", + }}, + }, + } + p.Default() + oldP.Default() + + err := p.ValidateUpdate(&oldP) + gotValid := err == nil + + switch { + case tc.wantValid && !gotValid: + t.Errorf("wants update valid, got error %v", err) + case !tc.wantValid && gotValid: + t.Errorf("wants an error on update, got no error") + default: + t.Logf("update passed %s", tc.desc) + // test passes, do nothing. + } + }) + } + +} + +func printFieldErrors(t *testing.T, err error) { + t.Helper() + statusErr, ok := err.(*apierrors.StatusError) + if ok { + t.Errorf("Field status errors: ") + for _, v := range statusErr.Status().Details.Causes { + t.Errorf(" %v %v: %v ", v.Field, v.Type, v.Message) + } + } +} diff --git a/internal/api/v1alpha1/authproxyworkload_types.go b/internal/api/v1/authproxyworkload_types.go similarity index 65% rename from internal/api/v1alpha1/authproxyworkload_types.go rename to internal/api/v1/authproxyworkload_types.go index edcb89cd..a66e389f 100644 --- a/internal/api/v1alpha1/authproxyworkload_types.go +++ b/internal/api/v1/authproxyworkload_types.go @@ -12,10 +12,10 @@ // See the License for the specific language governing permissions and // limitations under the License. -package v1alpha1 +package v1 import ( - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" ) @@ -76,27 +76,40 @@ const ( NoneStrategy = "None" ) -// AuthProxyWorkloadSpec defines the desired state of AuthProxyWorkload +// AuthProxyWorkload declares how a Cloud SQL Proxy container should be applied +// to a matching set of workloads, and shows the status of those proxy containers. +// +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +type AuthProxyWorkload struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec AuthProxyWorkloadSpec `json:"spec,omitempty"` + Status AuthProxyWorkloadStatus `json:"status,omitempty"` +} + +// AuthProxyWorkloadSpec describes where and how to configure the proxy. type AuthProxyWorkloadSpec struct { - // Workload selects the workload to + // Workload selects the workload where the proxy container will be added. //+kubebuilder:validation:Required Workload WorkloadSelectorSpec `json:"workloadSelector"` - // AuthProxyContainer describes the resources and config for the Auth Proxy container - //+kubebuilder:validation:Optional - AuthProxyContainer *AuthProxyContainerSpec `json:"authProxyContainer,omitempty"` - - // Instances lists the Cloud SQL instances to connect + // Instances describes the Cloud SQL instances to configure on the proxy container. //+kubebuilder:validation:Required //+kubebuilder:validation:MinItems=1 Instances []InstanceSpec `json:"instances"` + + // AuthProxyContainer describes the resources and config for the Auth Proxy container. + //+kubebuilder:validation:Optional + AuthProxyContainer *AuthProxyContainerSpec `json:"authProxyContainer,omitempty"` } // WorkloadSelectorSpec describes which workloads should be configured with this -// proxy configuration. To be valid, WorkloadSelectorSpec must specify Kind -// and either Name or Selector. +// proxy configuration. To be valid, WorkloadSelectorSpec must specify `kind` +// and either `name` or `selector`. type WorkloadSelectorSpec struct { - // Selector selects resources using labels. See "Label selectors" in the kubernetes docs + // Selector (optional) selects resources using labels. See "Label selectors" in the kubernetes docs // https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors //+kubebuilder:validation:Optional Selector *metav1.LabelSelector `json:"selector,omitempty"` @@ -124,32 +137,40 @@ func (s *WorkloadSelectorSpec) LabelsSelector() (labels.Selector, error) { return metav1.LabelSelectorAsSelector(s.Selector) } -// AuthProxyContainerSpec specifies configuration for the proxy container. +// AuthProxyContainerSpec describes how to configure global proxy configuration and +// kubernetes-specific container configuration. type AuthProxyContainerSpec struct { // Container is debugging parameter that when specified will override the // proxy container with a completely custom Container spec. //+kubebuilder:validation:Optional - Container *v1.Container `json:"container,omitempty"` + Container *corev1.Container `json:"container,omitempty"` // Resources specifies the resources required for the proxy pod. //+kubebuilder:validation:Optional - Resources *v1.ResourceRequirements `json:"resources,omitempty"` + Resources *corev1.ResourceRequirements `json:"resources,omitempty"` // Telemetry specifies how the proxy should expose telemetry. // Optional, by default //+kubebuilder:validation:Optional Telemetry *TelemetrySpec `json:"telemetry,omitempty"` + // AdminServer specifies the config for the proxy's admin service which is + // available to other containers in the same pod. + AdminServer *AdminServerSpec `json:"adminServer,omitempty"` + // MaxConnections limits the number of connections. Default value is no limit. // This sets the proxy container's CLI argument `--max-connections` //+kubebuilder:validation:Optional + //+kubebuilder:validation:Minimum=0 MaxConnections *int64 `json:"maxConnections,omitempty"` - // MaxSigtermDelay is the maximum number of seconds to wait for connections to close after receiving a TERM signal. - // This sets the proxy container's CLI argument `--max-sigterm-delay` and + // MaxSigtermDelay is the maximum number of seconds to wait for connections to + // close after receiving a TERM signal. This sets the proxy container's + // CLI argument `--max-sigterm-delay` and // configures `terminationGracePeriodSeconds` on the workload's PodSpec. //+kubebuilder:validation:Optional + //+kubebuilder:validation:Minimum=0 MaxSigtermDelay *int64 `json:"maxSigtermDelay,omitempty"` // SQLAdminAPIEndpoint is a debugging parameter that when specified will @@ -158,7 +179,17 @@ type AuthProxyContainerSpec struct { SQLAdminAPIEndpoint string `json:"sqlAdminAPIEndpoint,omitempty"` // Image is the URL to the proxy image. Optional, by default the operator - // will use the latest known compatible proxy image. + // will use the latest Cloud SQL Auth Proxy version as of the release of the + // operator. + // + // The operator ensures that all workloads configured with the default proxy + // image are upgraded automatically to use to the latest released proxy image. + // + // When the customer upgrades the operator, the operator upgrades all + // workloads using the default proxy image to the latest proxy image. The + // change to the proxy container image is applied in accordance with + // the RolloutStrategy. + // //+kubebuilder:validation:Optional Image string `json:"image,omitempty"` @@ -176,70 +207,118 @@ type AuthProxyContainerSpec struct { RolloutStrategy string `json:"rolloutStrategy,omitempty"` } +// AdminServerSpec specifies how to start the proxy's admin server: +// which port and whether to enable debugging or quitquitquit. It controls +// to the proxy's --admin-port, --debug, and --quitquitquit CLI flags. +type AdminServerSpec struct { + + // Port the port for the proxy's localhost-only admin server. + // This sets the proxy container's CLI argument `--admin-port` + //+kubebuilder:validation:required + //+kubebuilder:validation:Minimum=1 + Port int32 `json:"port,omitempty"` + + // EnableAPIs specifies the list of admin APIs to enable. At least one + // API must be enabled. Possible values: + // - "Debug" will enable pprof debugging by setting the `--debug` cli flag. + // - "QuitQuitQuit" will enable pprof debugging by setting the `--quitquitquit` + // cli flag. + //+kubebuilder:validation:MinItems:=1 + EnableAPIs []string `json:"enableAPIs,omitempty"` +} + // TelemetrySpec specifies how the proxy container will expose telemetry. type TelemetrySpec struct { + // QuotaProject Specifies the project to use for Cloud SQL Admin API quota tracking. + // The IAM principal must have the "serviceusage.services.use" permission + // for the given project. See https://cloud.google.com/service-usage/docs/overview and + // https://cloud.google.com/storage/docs/requester-pays + // This sets the proxy container's CLI argument `--quota-project` + //+kubebuilder:validation:Optional + QuotaProject *string `json:"quotaProject,omitempty"` + + // Prometheus Enables Prometheus HTTP endpoint /metrics on localhost + // This sets the proxy container's CLI argument `--prometheus` + //+kubebuilder:validation:Optional + Prometheus *bool `json:"prometheus,omitempty"` + + // PrometheusNamespace is used the provided Prometheus namespace for metrics + // This sets the proxy container's CLI argument `--prometheus-namespace` + //+kubebuilder:validation:Optional + PrometheusNamespace *string `json:"prometheusNamespace,omitempty"` + + // TelemetryProject enables Cloud Monitoring and Cloud Trace with the provided project ID. + // This sets the proxy container's CLI argument `--telemetry-project` + //+kubebuilder:validation:Optional + TelemetryProject *string `json:"telemetryProject,omitempty"` + + // TelemetryPrefix is the prefix for Cloud Monitoring metrics. + // This sets the proxy container's CLI argument `--telemetry-prefix` + //+kubebuilder:validation:Optional + TelemetryPrefix *string `json:"telemetryPrefix,omitempty"` + + // TelemetrySampleRate is the Cloud Trace sample rate. A smaller number means more traces. + // This sets the proxy container's CLI argument `--telemetry-sample-rate` + //+kubebuilder:validation:Optional + TelemetrySampleRate *int `json:"telemetrySampleRate,omitempty"` + // HTTPPort the port for Prometheus and health check server. // This sets the proxy container's CLI argument `--http-port` //+kubebuilder:validation:Optional HTTPPort *int32 `json:"httpPort,omitempty"` + + // DisableTraces disables Cloud Trace testintegration (used with telemetryProject) + // This sets the proxy container's CLI argument `--disable-traces` + //+kubebuilder:validation:Optional + DisableTraces *bool `json:"disableTraces,omitempty"` + // DisableMetrics disables Cloud Monitoring testintegration (used with telemetryProject) + // This sets the proxy container's CLI argument `--disable-metrics` + //+kubebuilder:validation:Optional + DisableMetrics *bool `json:"disableMetrics,omitempty"` } // InstanceSpec describes the configuration for how the proxy should expose -// a Cloud SQL database instance to a workload. The simplest possible configuration -// declares just the connection string and the port number or unix socket. -// -// For example, for a TCP port: -// -// { "connectionString":"my-project:us-central1:my-db-server", "port":5000 } -// -// or for a unix socket: -// -// { "connectionString":"my-project:us-central1:my-db-server", -// "unixSocketPath" : "/mnt/db/my-db-server" } -// -// You may allow the operator to choose a non-conflicting TCP port or unix socket -// instead of explicitly setting the port or socket path. This may be easier to -// manage when workload needs to connect to many databases. -// -// For example, for a TCP port: +// a Cloud SQL database instance to a workload. // -// { "connectionString":"my-project:us-central1:my-db-server", -// "portEnvName":"MY_DB_SERVER_PORT" -// "hostEnvName":"MY_DB_SERVER_HOST" -// } +// In the minimum recommended configuration, the operator will choose +// a non-conflicting TCP port and set environment +// variables MY_DB_SERVER_PORT MY_DB_SERVER_HOST with the value of the TCP port +// and hostname. The application can read these values to connect to the database +// through the proxy. For example: // -// will set environment variables MY_DB_SERVER_PORT MY_DB_SERVER_HOST with the -// value of the TCP port and hostname. Then, the application can read these values -// to connect to the database through the proxy. +// `{ +// "connectionString":"my-project:us-central1:my-db-server", +// "portEnvName":"MY_DB_SERVER_PORT" +// "hostEnvName":"MY_DB_SERVER_HOST" +// }` // -// or for a unix socket: +// If you want to assign a specific port number for a database, set the `port` +// field. For example: // -// { "connectionString":"my-project:us-central1:my-db-server", -// "unixSocketPathEnvName" : "MY_DB_SERVER_SOCKET_DIR" } -// -// will set environment variables MY_DB_SERVER_SOCKET_DIR with the -// value of the unix socket path. Then, the application can read this value -// to connect to the database through the proxy. +// `{ "connectionString":"my-project:us-central1:my-db-server", "port":5000 }` type InstanceSpec struct { - // ConnectionString is the Cloud SQL instance. + // ConnectionString is the connection string for the Cloud SQL Instance + // in the format `project_id:region:instance_name` //+kubebuilder:validation:Required + //+kubebuilder:validation:Pattern:="^([^:]+(:[^:]+)?):([^:]+):([^:]+)$" ConnectionString string `json:"connectionString,omitempty"` - // Port sets the tcp port for this instance. Optional, if not set, a value will + // Port (optional) sets the tcp port for this instance. If not set, a value will // be automatically assigned by the operator and set as an environment variable // on all containers in the workload named according to PortEnvName. The operator will choose // a port so that it does not conflict with other ports on the workload. //+kubebuilder:validation:Optional + //+kubebuilder:validation:Minimum:=1 Port *int32 `json:"port,omitempty"` - // AutoIAMAuthN Enables IAM Authentication for this instance. Optional, default - // false. + // AutoIAMAuthN (optional) Enables IAM Authentication for this instance. + // Default value is false. //+kubebuilder:validation:Optional AutoIAMAuthN *bool `json:"autoIAMAuthN,omitempty"` - // PrivateIP Enable connection to the Cloud SQL instance's private ip for this instance. - // Optional, default false. + // PrivateIP (optional) Enable connection to the Cloud SQL instance's private ip for this instance. + // Default value is false. //+kubebuilder:validation:Optional PrivateIP *bool `json:"privateIP,omitempty"` @@ -267,7 +346,6 @@ type InstanceSpec struct { // AuthProxyWorkloadStatus presents the observed state of AuthProxyWorkload using // standard Kubernetes Conditions. type AuthProxyWorkloadStatus struct { - // Conditions show the overall status of the AuthProxyWorkload resource on all // matching workloads. // @@ -299,20 +377,6 @@ type WorkloadStatus struct { Conditions []*metav1.Condition `json:"conditions"` } -// AuthProxyWorkload declares how a Cloud SQL Proxy container should be applied -// to a matching set of workloads, and shows the status of those proxy containers. -// This is the Schema for the authproxyworkloads API. -// -// +kubebuilder:object:root=true -// +kubebuilder:subresource:status -type AuthProxyWorkload struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata,omitempty"` - - Spec AuthProxyWorkloadSpec `json:"spec,omitempty"` - Status AuthProxyWorkloadStatus `json:"status,omitempty"` -} - // AuthProxyWorkloadList contains a list of AuthProxyWorkload and is part of the // authproxyworkloads API. // +kubebuilder:object:root=true diff --git a/internal/api/v1alpha1/authproxyworkload_webhook.go b/internal/api/v1/authproxyworkload_webhook.go similarity index 50% rename from internal/api/v1alpha1/authproxyworkload_webhook.go rename to internal/api/v1/authproxyworkload_webhook.go index 5f96d31d..4e5bd321 100644 --- a/internal/api/v1alpha1/authproxyworkload_webhook.go +++ b/internal/api/v1/authproxyworkload_webhook.go @@ -12,10 +12,11 @@ // See the License for the specific language governing permissions and // limitations under the License. -package v1alpha1 +package v1 import ( "fmt" + "path" "reflect" apierrors "k8s.io/apimachinery/pkg/api/errors" @@ -23,6 +24,7 @@ import ( "k8s.io/apimachinery/pkg/apis/meta/v1/validation" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" + apivalidation "k8s.io/apimachinery/pkg/util/validation" "k8s.io/apimachinery/pkg/util/validation/field" ctrl "sigs.k8s.io/controller-runtime" logf "sigs.k8s.io/controller-runtime/pkg/log" @@ -38,7 +40,7 @@ func (r *AuthProxyWorkload) SetupWebhookWithManager(mgr ctrl.Manager) error { Complete() } -// +kubebuilder:webhook:path=/mutate-cloudsql-cloud-google-com-v1alpha1-authproxyworkload,mutating=true,failurePolicy=fail,sideEffects=None,groups=cloudsql.cloud.google.com,resources=authproxyworkloads,verbs=create;update,versions=v1alpha1,name=mauthproxyworkload.kb.io,admissionReviewVersions=v1 +// +kubebuilder:webhook:path=/mutate-cloudsql-cloud-google-com-v1-authproxyworkload,mutating=true,failurePolicy=fail,sideEffects=None,groups=cloudsql.cloud.google.com,resources=authproxyworkloads,verbs=create;update,versions=v1,name=mauthproxyworkload.kb.io,admissionReviewVersions=v1 var _ webhook.Defaulter = &AuthProxyWorkload{} // Default implements webhook.Defaulter so a webhook will be registered for the type @@ -50,7 +52,7 @@ func (r *AuthProxyWorkload) Default() { } } -// +kubebuilder:webhook:path=/validate-cloudsql-cloud-google-com-v1alpha1-authproxyworkload,mutating=false,failurePolicy=fail,sideEffects=None,groups=cloudsql.cloud.google.com,resources=authproxyworkloads,verbs=create;update,versions=v1alpha1,name=vauthproxyworkload.kb.io,admissionReviewVersions=v1 +// +kubebuilder:webhook:path=/validate-cloudsql-cloud-google-com-v1-authproxyworkload,mutating=false,failurePolicy=fail,sideEffects=None,groups=cloudsql.cloud.google.com,resources=authproxyworkloads,verbs=create;update,versions=v1,name=vauthproxyworkload.kb.io,admissionReviewVersions=v1 var _ webhook.Validator = &AuthProxyWorkload{} // ValidateCreate implements webhook.Validator so a webhook will be registered for the type @@ -97,11 +99,49 @@ func (r *AuthProxyWorkload) validate() field.ErrorList { allErrs = append(allErrs, validation.ValidateLabelName(r.Name, field.NewPath("metadata", "name"))...) allErrs = append(allErrs, validateWorkload(&r.Spec.Workload, field.NewPath("spec", "workload"))...) + allErrs = append(allErrs, validateInstances(&r.Spec.Instances, field.NewPath("spec", "instances"))...) + allErrs = append(allErrs, validateContainer(r.Spec.AuthProxyContainer, field.NewPath("spec", "authProxyContainer"))...) return allErrs } +func validateContainer(spec *AuthProxyContainerSpec, f *field.Path) field.ErrorList { + if spec == nil { + return nil + } + + var allErrs field.ErrorList + if spec.AdminServer != nil { + if len(spec.AdminServer.EnableAPIs) == 0 { + allErrs = append(allErrs, field.Invalid( + f.Child("adminServer", "enableAPIs"), nil, + "enableAPIs must have at least one valid element: Debug or QuitQuitQuit")) + } + for i, v := range spec.AdminServer.EnableAPIs { + if v != "Debug" && v != "QuitQuitQuit" { + allErrs = append(allErrs, field.Invalid( + f.Child("adminServer", "enableAPIs", fmt.Sprintf("%d", i)), v, + "enableAPIs may contain the values \"Debug\" or \"QuitQuitQuit\"")) + } + } + } + if spec.AdminServer != nil { + errors := apivalidation.IsValidPortNum(int(spec.AdminServer.Port)) + for _, e := range errors { + allErrs = append(allErrs, field.Invalid( + f.Child("adminServer", "port"), + spec.AdminServer.Port, e)) + } + } + + return allErrs +} + +// validateUpdateFrom checks that an update to an AuthProxyWorkload resource +// adheres to these rules: +// - No changes to the workload selector +// - No changes to the RolloutStrategy func (r *AuthProxyWorkload) validateUpdateFrom(op *AuthProxyWorkload) field.ErrorList { var allErrs field.ErrorList @@ -121,7 +161,33 @@ func (r *AuthProxyWorkload) validateUpdateFrom(op *AuthProxyWorkload) field.Erro "selector cannot be changed on update")) } + allErrs = append(allErrs, validateRolloutStrategyChange(r.Spec.AuthProxyContainer, op.Spec.AuthProxyContainer)...) + + return allErrs +} + +// validateRolloutStrategyChange ensures that the rollout strategy does not +// change on update, taking default values into account. +func validateRolloutStrategyChange(c *AuthProxyContainerSpec, oc *AuthProxyContainerSpec) []*field.Error { + var allErrs field.ErrorList + var ( + s = WorkloadStrategy + os = WorkloadStrategy + ) + if c != nil && c.RolloutStrategy != "" { + s = c.RolloutStrategy + } + if oc != nil && oc.RolloutStrategy != "" { + os = oc.RolloutStrategy + } + if s != os { + allErrs = append(allErrs, field.Invalid( + field.NewPath("spec", "authProxyContainer", "rolloutStrategy"), s, + fmt.Sprintf("rolloutStrategy cannot be changed on update from %s", os))) + } + return allErrs + } func selectorNotEqual(s *metav1.LabelSelector, os *metav1.LabelSelector) bool { @@ -138,6 +204,11 @@ func selectorNotEqual(s *metav1.LabelSelector, os *metav1.LabelSelector) bool { var supportedKinds = []string{"CronJob", "Job", "StatefulSet", "Deployment", "DaemonSet", "ReplicaSet", "Pod"} +// validateWorkload ensures that the WorkloadSelectorSpec follows these rules: +// - Either Name or Selector is set +// - Kind is one of the supported kinds: "CronJob", "Job", "StatefulSet", +// "Deployment", "DaemonSet", "ReplicaSet", "Pod" +// - Selector is valid according to the k8s validation rules for LabelSelector func validateWorkload(spec *WorkloadSelectorSpec, f *field.Path) field.ErrorList { var errs field.ErrorList if spec.Selector != nil { @@ -170,3 +241,62 @@ func validateWorkload(spec *WorkloadSelectorSpec, f *field.Path) field.ErrorList return errs } + +// validateInstances ensures that InstanceSpec follows these rule: +// - There is at least 1 InstanceSpec +// - portEnvName, hostEnvName, and unixSocketPathEnvName have values that adhere +// to the standard k8s EnvName field validation. +// - Port has a valid port number according to the standard k8s Port field +// validation. +// - UnixSocketPath contains an absolute path. +// - The configuration clearly specifies either a TCP or a Unix socket but not +// both. +func validateInstances(spec *[]InstanceSpec, f *field.Path) field.ErrorList { + var errs field.ErrorList + if len(*spec) == 0 { + errs = append(errs, field.Invalid(f, + nil, + "at least one database instance must be declared")) + return errs + } + for i, inst := range *spec { + ff := f.Child(fmt.Sprintf("%d", i)) + if inst.Port != nil { + for _, s := range apivalidation.IsValidPortNum(int(*inst.Port)) { + errs = append(errs, field.Invalid(ff.Child("port"), inst.Port, s)) + } + } + errs = append(errs, validateEnvName(ff.Child("portEnvName"), + inst.PortEnvName)...) + errs = append(errs, validateEnvName(ff.Child("hostEnvName"), + inst.HostEnvName)...) + errs = append(errs, validateEnvName(ff.Child("unixSocketPathEnvName"), + inst.UnixSocketPathEnvName)...) + + if inst.UnixSocketPath != "" && !path.IsAbs(inst.UnixSocketPath) { + errs = append(errs, field.Invalid(ff.Child("unixSocketPath"), + inst.UnixSocketPath, "must be an absolute path")) + } + if inst.UnixSocketPath != "" && (inst.Port != nil || inst.PortEnvName != "") { + errs = append(errs, field.Invalid(ff.Child("unixSocketPath"), + inst.UnixSocketPath, + "unixSocketPath cannot be set when portEnvName or port are set. Databases can be configured to listen for either TCP or Unix socket connections, not both")) + } + if inst.UnixSocketPath == "" && inst.Port == nil && inst.PortEnvName == "" { + errs = append(errs, field.Invalid(f, + inst.UnixSocketPath, + "instance must specify at least one of the following: portEnvName, port, or unixSocketPath")) + } + } + return errs +} + +func validateEnvName(f *field.Path, envName string) field.ErrorList { + var errs field.ErrorList + if envName != "" { + for _, s := range apivalidation.IsEnvVarName(envName) { + errs = append(errs, field.Invalid(f, envName, s)) + } + } + return errs +} diff --git a/internal/api/v1alpha1/groupversion_info.go b/internal/api/v1/groupversion_info.go similarity index 81% rename from internal/api/v1alpha1/groupversion_info.go rename to internal/api/v1/groupversion_info.go index 2a163860..2f49af03 100644 --- a/internal/api/v1alpha1/groupversion_info.go +++ b/internal/api/v1/groupversion_info.go @@ -12,13 +12,12 @@ // See the License for the specific language governing permissions and // limitations under the License. -// Package v1alpha1 contains API Schema definitions for the cloudsql v1alpha1 API group: -// the custom resource AuthProxyWorkload version v1alpha1 -// This follows the kubebuilder pattern for defining custom resources. +// Package v1 contains the API Schema definitions for the +// the custom resource AuthProxyWorkload version v1. // // +kubebuilder:object:generate=true // +groupName=cloudsql.cloud.google.com -package v1alpha1 +package v1 import ( "k8s.io/apimachinery/pkg/runtime/schema" @@ -27,7 +26,7 @@ import ( var ( // GroupVersion is group version used to register these objects - GroupVersion = schema.GroupVersion{Group: "cloudsql.cloud.google.com", Version: "v1alpha1"} + GroupVersion = schema.GroupVersion{Group: "cloudsql.cloud.google.com", Version: "v1"} // SchemeBuilder is used to add go types to the GroupVersionKind scheme SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} diff --git a/internal/api/v1alpha1/zz_generated.deepcopy.go b/internal/api/v1/zz_generated.deepcopy.go similarity index 80% rename from internal/api/v1alpha1/zz_generated.deepcopy.go rename to internal/api/v1/zz_generated.deepcopy.go index 97ec9810..ca207a68 100644 --- a/internal/api/v1alpha1/zz_generated.deepcopy.go +++ b/internal/api/v1/zz_generated.deepcopy.go @@ -17,14 +17,34 @@ // Code generated by controller-gen. DO NOT EDIT. -package v1alpha1 +package v1 import ( corev1 "k8s.io/api/core/v1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" ) +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AdminServerSpec) DeepCopyInto(out *AdminServerSpec) { + *out = *in + if in.EnableAPIs != nil { + in, out := &in.EnableAPIs, &out.EnableAPIs + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdminServerSpec. +func (in *AdminServerSpec) DeepCopy() *AdminServerSpec { + if in == nil { + return nil + } + out := new(AdminServerSpec) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *AuthProxyContainerSpec) DeepCopyInto(out *AuthProxyContainerSpec) { *out = *in @@ -43,6 +63,11 @@ func (in *AuthProxyContainerSpec) DeepCopyInto(out *AuthProxyContainerSpec) { *out = new(TelemetrySpec) (*in).DeepCopyInto(*out) } + if in.AdminServer != nil { + in, out := &in.AdminServer, &out.AdminServer + *out = new(AdminServerSpec) + (*in).DeepCopyInto(*out) + } if in.MaxConnections != nil { in, out := &in.MaxConnections, &out.MaxConnections *out = new(int64) @@ -128,11 +153,6 @@ func (in *AuthProxyWorkloadList) DeepCopyObject() runtime.Object { func (in *AuthProxyWorkloadSpec) DeepCopyInto(out *AuthProxyWorkloadSpec) { *out = *in in.Workload.DeepCopyInto(&out.Workload) - if in.AuthProxyContainer != nil { - in, out := &in.AuthProxyContainer, &out.AuthProxyContainer - *out = new(AuthProxyContainerSpec) - (*in).DeepCopyInto(*out) - } if in.Instances != nil { in, out := &in.Instances, &out.Instances *out = make([]InstanceSpec, len(*in)) @@ -140,6 +160,11 @@ func (in *AuthProxyWorkloadSpec) DeepCopyInto(out *AuthProxyWorkloadSpec) { (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.AuthProxyContainer != nil { + in, out := &in.AuthProxyContainer, &out.AuthProxyContainer + *out = new(AuthProxyContainerSpec) + (*in).DeepCopyInto(*out) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthProxyWorkloadSpec. @@ -157,11 +182,11 @@ func (in *AuthProxyWorkloadStatus) DeepCopyInto(out *AuthProxyWorkloadStatus) { *out = *in if in.Conditions != nil { in, out := &in.Conditions, &out.Conditions - *out = make([]*v1.Condition, len(*in)) + *out = make([]*metav1.Condition, len(*in)) for i := range *in { if (*in)[i] != nil { in, out := &(*in)[i], &(*out)[i] - *out = new(v1.Condition) + *out = new(metav1.Condition) (*in).DeepCopyInto(*out) } } @@ -222,11 +247,51 @@ func (in *InstanceSpec) DeepCopy() *InstanceSpec { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *TelemetrySpec) DeepCopyInto(out *TelemetrySpec) { *out = *in + if in.QuotaProject != nil { + in, out := &in.QuotaProject, &out.QuotaProject + *out = new(string) + **out = **in + } + if in.Prometheus != nil { + in, out := &in.Prometheus, &out.Prometheus + *out = new(bool) + **out = **in + } + if in.PrometheusNamespace != nil { + in, out := &in.PrometheusNamespace, &out.PrometheusNamespace + *out = new(string) + **out = **in + } + if in.TelemetryProject != nil { + in, out := &in.TelemetryProject, &out.TelemetryProject + *out = new(string) + **out = **in + } + if in.TelemetryPrefix != nil { + in, out := &in.TelemetryPrefix, &out.TelemetryPrefix + *out = new(string) + **out = **in + } + if in.TelemetrySampleRate != nil { + in, out := &in.TelemetrySampleRate, &out.TelemetrySampleRate + *out = new(int) + **out = **in + } if in.HTTPPort != nil { in, out := &in.HTTPPort, &out.HTTPPort *out = new(int32) **out = **in } + if in.DisableTraces != nil { + in, out := &in.DisableTraces, &out.DisableTraces + *out = new(bool) + **out = **in + } + if in.DisableMetrics != nil { + in, out := &in.DisableMetrics, &out.DisableMetrics + *out = new(bool) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TelemetrySpec. @@ -244,7 +309,7 @@ func (in *WorkloadSelectorSpec) DeepCopyInto(out *WorkloadSelectorSpec) { *out = *in if in.Selector != nil { in, out := &in.Selector, &out.Selector - *out = new(v1.LabelSelector) + *out = new(metav1.LabelSelector) (*in).DeepCopyInto(*out) } } @@ -264,11 +329,11 @@ func (in *WorkloadStatus) DeepCopyInto(out *WorkloadStatus) { *out = *in if in.Conditions != nil { in, out := &in.Conditions, &out.Conditions - *out = make([]*v1.Condition, len(*in)) + *out = make([]*metav1.Condition, len(*in)) for i := range *in { if (*in)[i] != nil { in, out := &(*in)[i], &(*out)[i] - *out = new(v1.Condition) + *out = new(metav1.Condition) (*in).DeepCopyInto(*out) } } diff --git a/internal/api/v1alpha1/authproxyworkload_test.go b/internal/api/v1alpha1/authproxyworkload_test.go deleted file mode 100644 index d6e2a645..00000000 --- a/internal/api/v1alpha1/authproxyworkload_test.go +++ /dev/null @@ -1,262 +0,0 @@ -// Copyright 2023 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v1alpha1_test - -import ( - "testing" - - cloudsqlapi "github.com/GoogleCloudPlatform/cloud-sql-proxy-operator/internal/api/v1alpha1" - apierrors "k8s.io/apimachinery/pkg/api/errors" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -func TestAuthProxyWorkload_ValidateCreate(t *testing.T) { - data := []struct { - desc string - spec cloudsqlapi.AuthProxyWorkloadSpec - wantValid bool - }{ - { - desc: "Valid WorkloadSelectorSpec with Name", - spec: cloudsqlapi.AuthProxyWorkloadSpec{ - Workload: cloudsqlapi.WorkloadSelectorSpec{ - Kind: "Deployment", - Name: "webapp", - }, - }, - wantValid: true, - }, - { - desc: "Valid WorkloadSelectorSpec with Selector", - spec: cloudsqlapi.AuthProxyWorkloadSpec{ - Workload: cloudsqlapi.WorkloadSelectorSpec{ - Kind: "Deployment", - Selector: &v1.LabelSelector{ - MatchLabels: map[string]string{"app": "sample"}, - }, - }, - }, - wantValid: true, - }, - { - desc: "Invalid, both workload selector and name both set", - spec: cloudsqlapi.AuthProxyWorkloadSpec{ - Workload: cloudsqlapi.WorkloadSelectorSpec{ - Kind: "Deployment", - Name: "webapp", - Selector: &v1.LabelSelector{ - MatchLabels: map[string]string{"app": "sample"}, - }, - }, - }, - wantValid: false, - }, - { - desc: "Invalid, WorkloadSelector missing name and selector", - spec: cloudsqlapi.AuthProxyWorkloadSpec{ - Workload: cloudsqlapi.WorkloadSelectorSpec{Kind: "Deployment"}, - }, - wantValid: false, - }, - { - desc: "Valid, Instance configured with PortEnvName", - spec: cloudsqlapi.AuthProxyWorkloadSpec{ - Workload: cloudsqlapi.WorkloadSelectorSpec{ - Kind: "Deployment", - Name: "webapp", - }, - Instances: []cloudsqlapi.InstanceSpec{{ - ConnectionString: "proj:region:db2", - PortEnvName: "DB_PORT", - }}, - }, - wantValid: true, - }, - } - - for _, tc := range data { - t.Run(tc.desc, func(t *testing.T) { - p := cloudsqlapi.AuthProxyWorkload{ - ObjectMeta: v1.ObjectMeta{Name: "sample"}, - Spec: tc.spec, - } - err := p.ValidateCreate() - gotValid := err == nil - switch { - case tc.wantValid && !gotValid: - t.Errorf("wants create valid, got error %v", err) - printFieldErrors(t, err) - case !tc.wantValid && gotValid: - t.Errorf("wants an error on create, got no error") - default: - t.Logf("create passed %s", tc.desc) - // test passes, do nothing. - } - }) - } -} - -func TestAuthProxyWorkload_ValidateUpdate(t *testing.T) { - data := []struct { - desc string - spec cloudsqlapi.AuthProxyWorkloadSpec - oldSpec cloudsqlapi.AuthProxyWorkloadSpec - wantValid bool - }{ - { - desc: "Valid, update adds another instance", - spec: cloudsqlapi.AuthProxyWorkloadSpec{ - Workload: cloudsqlapi.WorkloadSelectorSpec{ - Kind: "Deployment", - Name: "webapp", - }, - Instances: []cloudsqlapi.InstanceSpec{{ - ConnectionString: "proj:region:db1", - PortEnvName: "DB_PORT", - }}, - }, - oldSpec: cloudsqlapi.AuthProxyWorkloadSpec{ - Workload: cloudsqlapi.WorkloadSelectorSpec{ - Kind: "Deployment", - Name: "webapp", - }, - Instances: []cloudsqlapi.InstanceSpec{ - { - ConnectionString: "proj:region:db1", - PortEnvName: "DB_PORT", - }, - { - ConnectionString: "proj:region:db2", - PortEnvName: "DB_PORT2", - }, - }, - }, - wantValid: true, - }, - { - desc: "Invalid, WorkloadSelectorSpec.Kind changed", - spec: cloudsqlapi.AuthProxyWorkloadSpec{ - Workload: cloudsqlapi.WorkloadSelectorSpec{ - Kind: "Deployment", - Name: "webapp", - }, - Instances: []cloudsqlapi.InstanceSpec{{ - ConnectionString: "proj:region:db2", - PortEnvName: "DB_PORT", - }}, - }, - oldSpec: cloudsqlapi.AuthProxyWorkloadSpec{ - Workload: cloudsqlapi.WorkloadSelectorSpec{ - Kind: "StatefulSet", - Name: "webapp", - }, - Instances: []cloudsqlapi.InstanceSpec{{ - ConnectionString: "proj:region:db1", - PortEnvName: "DB_PORT", - }}, - }, - wantValid: false, - }, - { - desc: "Invalid, WorkloadSelectorSpec.Name changed", - spec: cloudsqlapi.AuthProxyWorkloadSpec{ - Workload: cloudsqlapi.WorkloadSelectorSpec{ - Kind: "Deployment", - Name: "things", - }, - Instances: []cloudsqlapi.InstanceSpec{{ - ConnectionString: "proj:region:db2", - PortEnvName: "DB_PORT", - }}, - }, - oldSpec: cloudsqlapi.AuthProxyWorkloadSpec{ - Workload: cloudsqlapi.WorkloadSelectorSpec{ - Kind: "Deployment", - Name: "webapp", - }, - Instances: []cloudsqlapi.InstanceSpec{{ - ConnectionString: "proj:region:db1", - PortEnvName: "DB_PORT", - }}, - }, - wantValid: false, - }, - { - desc: "Invalid, WorkloadSelectorSpec.Selector changed", - spec: cloudsqlapi.AuthProxyWorkloadSpec{ - Workload: cloudsqlapi.WorkloadSelectorSpec{ - Kind: "Deployment", - Selector: &v1.LabelSelector{ - MatchLabels: map[string]string{"app": "sample"}, - }, - }, - Instances: []cloudsqlapi.InstanceSpec{{ - ConnectionString: "proj:region:db2", - PortEnvName: "DB_PORT", - }}, - }, - oldSpec: cloudsqlapi.AuthProxyWorkloadSpec{ - Workload: cloudsqlapi.WorkloadSelectorSpec{ - Kind: "Deployment", - Selector: &v1.LabelSelector{ - MatchLabels: map[string]string{"app": "other"}, - }, - }, - Instances: []cloudsqlapi.InstanceSpec{{ - ConnectionString: "proj:region:db1", - PortEnvName: "DB_PORT", - }}, - }, - wantValid: false, - }, - } - - for _, tc := range data { - t.Run(tc.desc, func(t *testing.T) { - p := cloudsqlapi.AuthProxyWorkload{ - ObjectMeta: v1.ObjectMeta{Name: "sample"}, - Spec: tc.spec, - } - oldP := cloudsqlapi.AuthProxyWorkload{ - ObjectMeta: v1.ObjectMeta{Name: "sample"}, - Spec: tc.oldSpec, - } - - err := p.ValidateUpdate(&oldP) - gotValid := err == nil - - switch { - case tc.wantValid && !gotValid: - t.Errorf("wants create valid, got error %v", err) - case !tc.wantValid && gotValid: - t.Errorf("wants an error on update, got no error") - default: - t.Logf("update passed %s", tc.desc) - // test passes, do nothing. - } - }) - } -} - -func printFieldErrors(t *testing.T, err error) { - t.Helper() - statusErr, ok := err.(*apierrors.StatusError) - if ok { - t.Errorf("Field status errors: ") - for _, v := range statusErr.Status().Details.Causes { - t.Errorf(" %v %v: %v ", v.Field, v.Type, v.Message) - } - } -} diff --git a/internal/controller/authproxyworkload_controller.go b/internal/controller/authproxyworkload_controller.go index d5961e89..6bf746dc 100644 --- a/internal/controller/authproxyworkload_controller.go +++ b/internal/controller/authproxyworkload_controller.go @@ -30,7 +30,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" "sigs.k8s.io/controller-runtime/pkg/log" - cloudsqlapi "github.com/GoogleCloudPlatform/cloud-sql-proxy-operator/internal/api/v1alpha1" + cloudsqlapi "github.com/GoogleCloudPlatform/cloud-sql-proxy-operator/internal/api/v1" "github.com/GoogleCloudPlatform/cloud-sql-proxy-operator/internal/workload" "github.com/go-logr/logr" ) @@ -280,7 +280,7 @@ func (r *AuthProxyWorkloadReconciler) needsAnnotationUpdate(wl workload.Workload return false } - k, v := workload.PodAnnotation(resource) + k, v := r.updater.PodAnnotation(resource) // Check if the correct annotation exists an := wl.PodTemplateAnnotations() if an != nil && an[k] == v { @@ -304,7 +304,7 @@ func (r *AuthProxyWorkloadReconciler) updateAnnotation(wl workload.Workload, res return } - k, v := workload.PodAnnotation(resource) + k, v := r.updater.PodAnnotation(resource) // add the annotation if needed... an := wl.PodTemplateAnnotations() diff --git a/internal/controller/authproxyworkload_controller_test.go b/internal/controller/authproxyworkload_controller_test.go index c1ccac14..831275a5 100644 --- a/internal/controller/authproxyworkload_controller_test.go +++ b/internal/controller/authproxyworkload_controller_test.go @@ -26,6 +26,7 @@ import ( corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" @@ -33,7 +34,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/log/zap" - "github.com/GoogleCloudPlatform/cloud-sql-proxy-operator/internal/api/v1alpha1" + cloudsqlapi "github.com/GoogleCloudPlatform/cloud-sql-proxy-operator/internal/api/v1" "github.com/GoogleCloudPlatform/cloud-sql-proxy-operator/internal/testhelpers" "github.com/GoogleCloudPlatform/cloud-sql-proxy-operator/internal/workload" ) @@ -73,12 +74,12 @@ func TestReconcileDeleted(t *testing.T) { addFinalizers(p) addPodWorkload(p) - cb, err := clientBuilder() + cb, _, err := clientBuilder() if err != nil { t.Error(err) // shouldn't ever happen } c := cb.WithObjects(p).Build() - r, req, ctx := reconciler(p, c) + r, req, ctx := reconciler(p, c, workload.DefaultProxyImage) c.Delete(ctx, p) if err != nil { @@ -115,7 +116,7 @@ func TestReconcileState21ByName(t *testing.T) { addFinalizers(p) addPodWorkload(p) - _, _, err := runReconcileTestcase(p, []client.Object{p}, false, metav1.ConditionTrue, v1alpha1.ReasonNoWorkloadsFound) + _, _, err := runReconcileTestcase(p, []client.Object{p}, false, metav1.ConditionTrue, cloudsqlapi.ReasonNoWorkloadsFound) if err != nil { t.Fatal(err) } @@ -129,7 +130,7 @@ func TestReconcileState21BySelector(t *testing.T) { addFinalizers(p) addSelectorWorkload(p, "Pod", "app", "things") - _, _, err := runReconcileTestcase(p, []client.Object{p}, false, metav1.ConditionTrue, v1alpha1.ReasonNoWorkloadsFound) + _, _, err := runReconcileTestcase(p, []client.Object{p}, false, metav1.ConditionTrue, cloudsqlapi.ReasonNoWorkloadsFound) if err != nil { t.Fatal(err) } @@ -140,7 +141,7 @@ func TestReconcileState32(t *testing.T) { const ( wantRequeue = true wantStatus = metav1.ConditionFalse - wantReason = v1alpha1.ReasonWorkloadNeedsUpdate + wantReason = cloudsqlapi.ReasonWorkloadNeedsUpdate labelK = "app" labelV = "things" ) @@ -153,7 +154,7 @@ func TestReconcileState32(t *testing.T) { addSelectorWorkload(p, "Deployment", labelK, labelV) // mimic a pod that was updated by the webhook - reqName := v1alpha1.AnnotationPrefix + "/" + p.Name + reqName := cloudsqlapi.AnnotationPrefix + "/" + p.Name pod := &appsv1.Deployment{ ObjectMeta: metav1.ObjectMeta{ Name: "thing", @@ -191,7 +192,7 @@ func TestReconcileState32RolloutStrategyNone(t *testing.T) { const ( wantRequeue = false wantStatus = metav1.ConditionTrue - wantReason = v1alpha1.ReasonFinishedReconcile + wantReason = cloudsqlapi.ReasonFinishedReconcile labelK = "app" labelV = "things" ) @@ -200,8 +201,8 @@ func TestReconcileState32RolloutStrategyNone(t *testing.T) { Namespace: "default", Name: "test", }, "project:region:db") - p.Spec.AuthProxyContainer = &v1alpha1.AuthProxyContainerSpec{ - RolloutStrategy: v1alpha1.NoneStrategy, + p.Spec.AuthProxyContainer = &cloudsqlapi.AuthProxyContainerSpec{ + RolloutStrategy: cloudsqlapi.NoneStrategy, } p.Generation = 2 addFinalizers(p) @@ -247,7 +248,7 @@ func TestReconcileState33(t *testing.T) { const ( wantRequeue = false wantStatus = metav1.ConditionTrue - wantReason = v1alpha1.ReasonFinishedReconcile + wantReason = cloudsqlapi.ReasonFinishedReconcile labelK = "app" labelV = "things" ) @@ -261,7 +262,7 @@ func TestReconcileState33(t *testing.T) { addSelectorWorkload(p, "Deployment", labelK, labelV) // mimic a pod that was updated by the webhook - reqName := v1alpha1.AnnotationPrefix + "/" + p.Name + reqName, reqVal := workload.PodAnnotation(p, workload.DefaultProxyImage) pod := &appsv1.Deployment{ ObjectMeta: metav1.ObjectMeta{ Name: "thing", @@ -269,7 +270,7 @@ func TestReconcileState33(t *testing.T) { Labels: map[string]string{labelK: labelV}, }, Spec: appsv1.DeploymentSpec{Template: corev1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{Annotations: map[string]string{reqName: "1"}}, + ObjectMeta: metav1.ObjectMeta{Annotations: map[string]string{reqName: reqVal}}, }}, } @@ -293,7 +294,7 @@ func TestReconcileDeleteUpdatesWorkload(t *testing.T) { addFinalizers(resource) addSelectorWorkload(resource, "Deployment", labelK, labelV) - k, v := workload.PodAnnotation(resource) + k, v := workload.PodAnnotation(resource, workload.DefaultProxyImage) // mimic a deployment that was updated by the webhook deployment := &appsv1.Deployment{ @@ -308,12 +309,12 @@ func TestReconcileDeleteUpdatesWorkload(t *testing.T) { } // Build a client with the resource and deployment - cb, err := clientBuilder() + cb, _, err := clientBuilder() if err != nil { t.Error(err) // shouldn't ever happen } c := cb.WithObjects(resource, deployment).Build() - r, req, ctx := reconciler(resource, c) + r, req, ctx := reconciler(resource, c, workload.DefaultProxyImage) // Delete the resource c.Delete(ctx, resource) @@ -360,15 +361,82 @@ func TestReconcileDeleteUpdatesWorkload(t *testing.T) { } -func runReconcileTestcase(p *v1alpha1.AuthProxyWorkload, clientObjects []client.Object, wantRequeue bool, wantStatus metav1.ConditionStatus, wantReason string) (client.WithWatch, context.Context, error) { - cb, err := clientBuilder() +func TestWorkloadUpdatedAfterDefaultProxyImageChanged(t *testing.T) { + const ( + labelK = "app" + labelV = "things" + ) + resource := testhelpers.BuildAuthProxyWorkload(types.NamespacedName{ + Namespace: "default", + Name: "test", + }, "project:region:db") + resource.Generation = 1 + addFinalizers(resource) + addSelectorWorkload(resource, "Deployment", labelK, labelV) + + // Deployment annotation should be updated to this after reconcile: + _, wantV := workload.PodAnnotation(resource, "gcr.io/cloud-sql-connectors/cloud-sql-proxy:999.9.9") + + // mimic a deployment that was updated by the webhook + // annotate the deployment with the default image + k, v := workload.PodAnnotation(resource, "gcr.io/cloud-sql-connectors/cloud-sql-proxy:1.1.1") + deployment := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "thing", + Namespace: "default", + Labels: map[string]string{labelK: labelV}, + }, + Spec: appsv1.DeploymentSpec{Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{Annotations: map[string]string{k: v}}, + }}, + } + + // Build a client with the resource and deployment + cb, _, err := clientBuilder() + if err != nil { + t.Error(err) // shouldn't ever happen + } + c := cb.WithObjects(resource, deployment).Build() + + // Create a reconciler with the default proxy image at a different version + r, req, ctx := reconciler(resource, c, "gcr.io/cloud-sql-connectors/cloud-sql-proxy:999.9.9") + + // Run Reconcile on the resource + res, err := r.Reconcile(ctx, req) + if err != nil { + t.Error(err) + } + + if !res.Requeue { + t.Errorf("got %v, want %v for requeue", res.Requeue, true) + } + + // Fetch the deployment and make sure the annotations show the + // deleted resource. + d := &appsv1.Deployment{} + err = c.Get(ctx, types.NamespacedName{ + Namespace: deployment.GetNamespace(), + Name: deployment.GetName(), + }, d) + if err != nil { + t.Fatal(err) + } + + if got := d.Spec.Template.ObjectMeta.Annotations[k]; got != wantV { + t.Fatalf("got %v, wants annotation value %v", got, wantV) + } + +} + +func runReconcileTestcase(p *cloudsqlapi.AuthProxyWorkload, clientObjects []client.Object, wantRequeue bool, wantStatus metav1.ConditionStatus, wantReason string) (client.WithWatch, context.Context, error) { + cb, _, err := clientBuilder() if err != nil { return nil, nil, err // shouldn't ever happen } c := cb.WithObjects(clientObjects...).Build() - r, req, ctx := reconciler(p, c) + r, req, ctx := reconciler(p, c, workload.DefaultProxyImage) res, err := r.Reconcile(ctx, req) if err != nil { return nil, nil, err @@ -385,7 +453,7 @@ func runReconcileTestcase(p *v1alpha1.AuthProxyWorkload, clientObjects []client. } if wantStatus != "" || wantReason != "" { - cond := findCondition(p.Status.Conditions, v1alpha1.ConditionUpToDate) + cond := findCondition(p.Status.Conditions, cloudsqlapi.ConditionUpToDate) if cond == nil { return nil, nil, fmt.Errorf("the UpToDate condition was nil, wants condition to exist") } @@ -400,29 +468,29 @@ func runReconcileTestcase(p *v1alpha1.AuthProxyWorkload, clientObjects []client. return c, ctx, nil } -func clientBuilder() (*fake.ClientBuilder, error) { - scheme, err := v1alpha1.SchemeBuilder.Build() +func clientBuilder() (*fake.ClientBuilder, *runtime.Scheme, error) { + scheme, err := cloudsqlapi.SchemeBuilder.Build() if err != nil { - return nil, err + return nil, nil, err } err = corev1.AddToScheme(scheme) if err != nil { - return nil, err + return nil, nil, err } err = appsv1.AddToScheme(scheme) if err != nil { - return nil, err + return nil, nil, err } - return fake.NewClientBuilder().WithScheme(scheme), nil + return fake.NewClientBuilder().WithScheme(scheme), scheme, nil } -func reconciler(p *v1alpha1.AuthProxyWorkload, cb client.Client) (*AuthProxyWorkloadReconciler, ctrl.Request, context.Context) { +func reconciler(p *cloudsqlapi.AuthProxyWorkload, cb client.Client, defaultProxyImage string) (*AuthProxyWorkloadReconciler, ctrl.Request, context.Context) { ctx := log.IntoContext(context.Background(), logger) r := &AuthProxyWorkloadReconciler{ Client: cb, recentlyDeleted: &recentlyDeletedCache{}, - updater: workload.NewUpdater("cloud-sql-proxy-operator/dev"), + updater: workload.NewUpdater("cloud-sql-proxy-operator/dev", defaultProxyImage), } req := ctrl.Request{ NamespacedName: types.NamespacedName{ @@ -433,17 +501,17 @@ func reconciler(p *v1alpha1.AuthProxyWorkload, cb client.Client) (*AuthProxyWork return r, req, ctx } -func addFinalizers(p *v1alpha1.AuthProxyWorkload) { +func addFinalizers(p *cloudsqlapi.AuthProxyWorkload) { p.Finalizers = []string{finalizerName} } -func addPodWorkload(p *v1alpha1.AuthProxyWorkload) { - p.Spec.Workload = v1alpha1.WorkloadSelectorSpec{ +func addPodWorkload(p *cloudsqlapi.AuthProxyWorkload) { + p.Spec.Workload = cloudsqlapi.WorkloadSelectorSpec{ Kind: "Pod", Name: "testpod", } } -func addSelectorWorkload(p *v1alpha1.AuthProxyWorkload, kind, labelK, labelV string) { - p.Spec.Workload = v1alpha1.WorkloadSelectorSpec{ +func addSelectorWorkload(p *cloudsqlapi.AuthProxyWorkload, kind, labelK, labelV string) { + p.Spec.Workload = cloudsqlapi.WorkloadSelectorSpec{ Kind: kind, Selector: &metav1.LabelSelector{ MatchLabels: map[string]string{labelK: labelV}, diff --git a/internal/controller/pod_controller.go b/internal/controller/pod_controller.go index a3661e65..a1fd0dec 100644 --- a/internal/controller/pod_controller.go +++ b/internal/controller/pod_controller.go @@ -27,7 +27,7 @@ import ( logf "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/webhook/admission" - cloudsqlapi "github.com/GoogleCloudPlatform/cloud-sql-proxy-operator/internal/api/v1alpha1" + cloudsqlapi "github.com/GoogleCloudPlatform/cloud-sql-proxy-operator/internal/api/v1" "github.com/GoogleCloudPlatform/cloud-sql-proxy-operator/internal/workload" ) @@ -49,44 +49,66 @@ func (a *PodAdmissionWebhook) InjectDecoder(d *admission.Decoder) error { // the proxy sidecars on all workloads to match the AuthProxyWorkload config. func (a *PodAdmissionWebhook) Handle(ctx context.Context, req admission.Request) admission.Response { l := logf.FromContext(ctx) - wl := &workload.PodWorkload{ - Pod: &corev1.Pod{}, - } - err := a.decoder.Decode(req, wl.Object()) + p := corev1.Pod{} + err := a.decoder.Decode(req, &p) if err != nil { l.Info("/mutate-pod request can't be processed", "kind", req.Kind.Kind, "ns", req.Namespace, "name", req.Name) return admission.Errored(http.StatusInternalServerError, err) } + updatedPod, err := a.handleCreatePodRequest(ctx, p) + if err != nil { + return admission.Errored(http.StatusInternalServerError, err) + } + + if updatedPod == nil { + return admission.Allowed("no changes to pod") + } + + // Marshal the updated Pod and prepare to send a response + marshaledRes, err := json.Marshal(updatedPod) + if err != nil { + l.Error(err, "Unable to marshal workload result in webhook", + "kind", req.Kind.Kind, "ns", req.Namespace, "name", req.Name) + return admission.Errored(http.StatusInternalServerError, + fmt.Errorf("unable to marshal workload result")) + } + + return admission.PatchResponseFromRaw(req.Object.Raw, marshaledRes) +} + +// handleCreatePodRequest Finds relevant AuthProxyWorkload resources and updates the pod +// with matching resources, returning a non-nil pod when the pod was updated. +func (a *PodAdmissionWebhook) handleCreatePodRequest(ctx context.Context, p corev1.Pod) (*corev1.Pod, error) { var ( instList = &cloudsqlapi.AuthProxyWorkloadList{} proxies []*cloudsqlapi.AuthProxyWorkload wlConfigErr error + l = logf.FromContext(ctx) + wl = &workload.PodWorkload{Pod: &p} ) // List all the AuthProxyWorkloads in the same namespace. // To avoid privilege escalation, the operator requires that the AuthProxyWorkload // may only affect pods in the same namespace. - err = a.Client.List(ctx, instList, client.InNamespace(wl.Object().GetNamespace())) + err := a.Client.List(ctx, instList, client.InNamespace(wl.Object().GetNamespace())) if err != nil { l.Error(err, "Unable to list CloudSqlClient resources in webhook", - "kind", req.Kind.Kind, "ns", req.Namespace, "name", req.Name) - return admission.Errored(http.StatusInternalServerError, - fmt.Errorf("unable to list CloudSqlClient resources")) + "kind", wl.Pod.Kind, "ns", wl.Pod.Namespace, "name", wl.Pod.Name) + return nil, fmt.Errorf("unable to list AuthProxyWorkloads, %v", err) } // List the owners of this pod. owners, err := a.listOwners(ctx, wl.Object()) if err != nil { - return admission.Errored(http.StatusInternalServerError, - fmt.Errorf("there is an AuthProxyWorkloadConfiguration error reconciling this workload %v", err)) + return nil, fmt.Errorf("there is an AuthProxyWorkloadConfiguration error reconciling this workload %v", err) } // Find matching AuthProxyWorkloads for this pod proxies = a.updater.FindMatchingAuthProxyWorkloads(instList, wl, owners) if len(proxies) == 0 { - return admission.PatchResponseFromRaw(req.Object.Raw, req.Object.Raw) + return nil, nil // no change } // Configure the pod, adding containers for each of the proxies @@ -94,32 +116,11 @@ func (a *PodAdmissionWebhook) Handle(ctx context.Context, req admission.Request) if wlConfigErr != nil { l.Error(wlConfigErr, "Unable to reconcile workload result in webhook: "+wlConfigErr.Error(), - "kind", req.Kind.Kind, "ns", req.Namespace, "name", req.Name) - return admission.Errored(http.StatusInternalServerError, - fmt.Errorf("there is an AuthProxyWorkloadConfiguration error reconciling this workload %v", wlConfigErr)) - } - - // Log some information about the pod update - l.Info(fmt.Sprintf("Workload operation %s on kind %s named %s/%s required an update", - req.Operation, req.Kind, req.Namespace, req.Name)) - for _, inst := range proxies { - l.Info(fmt.Sprintf("inst %v %v/%v updated at instance resource version %v", - wl.Object().GetObjectKind().GroupVersionKind().String(), - wl.Object().GetNamespace(), wl.Object().GetName(), - inst.GetResourceVersion())) - } - - // Marshal the updated Pod and prepare to send a response - result := wl.Object() - marshaledRes, err := json.Marshal(result) - if err != nil { - l.Error(err, "Unable to marshal workload result in webhook", - "kind", req.Kind.Kind, "ns", req.Namespace, "name", req.Name) - return admission.Errored(http.StatusInternalServerError, - fmt.Errorf("unable to marshal workload result")) + "kind", wl.Pod.Kind, "ns", wl.Pod.Namespace, "name", wl.Pod.Name) + return nil, fmt.Errorf("there is an AuthProxyWorkloadConfiguration error reconciling this workload %v", wlConfigErr) } - return admission.PatchResponseFromRaw(req.Object.Raw, marshaledRes) + return wl.Pod, nil // updated } // listOwners returns the list of this object's owners and its extended owners. @@ -134,14 +135,14 @@ func (a *PodAdmissionWebhook) listOwners(ctx context.Context, object client.Obje wl, err := workload.WorkloadForKind(r.Kind) if err != nil { - owner = &metav1.PartialObjectMetadata{ - TypeMeta: metav1.TypeMeta{Kind: r.Kind, APIVersion: r.APIVersion}, - } - } else { - owners = append(owners, wl) - owner = wl.Object() + // If the operator doesn't recognize the owner's Kind, then ignore + // that owner. + continue } + owners = append(owners, wl) + owner = wl.Object() + err = a.Client.Get(ctx, key, owner) if err != nil { switch t := err.(type) { diff --git a/internal/controller/pod_controller_test.go b/internal/controller/pod_controller_test.go new file mode 100644 index 00000000..e5cbd2f6 --- /dev/null +++ b/internal/controller/pod_controller_test.go @@ -0,0 +1,158 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package controller + +import ( + "context" + "testing" + + cloudsqlapi "github.com/GoogleCloudPlatform/cloud-sql-proxy-operator/internal/api/v1" + "github.com/GoogleCloudPlatform/cloud-sql-proxy-operator/internal/testhelpers" + "github.com/GoogleCloudPlatform/cloud-sql-proxy-operator/internal/workload" + appsv1 "k8s.io/api/apps/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" +) + +func TestPodWebhookWithDeploymentOwners(t *testing.T) { + _, scheme, err := clientBuilder() + if err != nil { + t.Fatal(err) + } + + // Proxy workload + p := testhelpers.BuildAuthProxyWorkload(types.NamespacedName{ + Namespace: "default", + Name: "test", + }, "project:region:db") + addFinalizers(p) + addSelectorWorkload(p, "Deployment", "app", "webapp") + + // Deployment that matches the proxy + dMatch := testhelpers.BuildDeployment(types.NamespacedName{ + Namespace: "default", + Name: "test", + }, "webapp") + dMatch.ObjectMeta.Labels = map[string]string{"app": "webapp"} + + // Deployment that does not match the proxy + dNoMatch := testhelpers.BuildDeployment(types.NamespacedName{ + Namespace: "default", + Name: "test", + }, "webapp") + dNoMatch.ObjectMeta.Labels = map[string]string{"app": "other"} + + // Deployment matches the proxy and is owned by another resource + // called CustomApp + dWithOwner := testhelpers.BuildDeployment(types.NamespacedName{ + Namespace: "default", + Name: "test", + }, "webapp") + dWithOwner.ObjectMeta.Labels = map[string]string{"app": "webapp"} + deploymentOwner := &v1.PartialObjectMetadata{ + TypeMeta: v1.TypeMeta{Kind: "CustomApp", APIVersion: "v1"}, + ObjectMeta: v1.ObjectMeta{Name: "custom-app", Namespace: "default"}, + } + err = controllerutil.SetOwnerReference(deploymentOwner, dWithOwner, scheme) + if err != nil { + t.Fatal(err) + } + + data := []struct { + name string + p *cloudsqlapi.AuthProxyWorkload + d *appsv1.Deployment + wantUpdate bool + }{ + { + name: "Deployment Pod with matching Workload", + p: p, + d: dMatch, + wantUpdate: true, + }, + { + name: "Deployment Pod with no match", + p: p, + d: dNoMatch, + wantUpdate: false, + }, + { + name: "Deployment Pod with unknown owner", + p: p, + d: dWithOwner, + wantUpdate: true, + }, + } + for _, tc := range data { + t.Run(tc.name, func(t *testing.T) { + cb, scheme, err := clientBuilder() + if err != nil { + t.Fatal(err) + } + + rs, hash, err := testhelpers.BuildDeploymentReplicaSet(tc.d, scheme) + if err != nil { + t.Fatal(err) + } + pods, err := testhelpers.BuildDeploymentReplicaSetPods(tc.d, rs, hash, scheme) + if err != nil { + t.Fatal(err) + } + + c := cb.WithObjects(p).WithObjects(rs).WithObjects(tc.d).Build() + wh, ctx, err := podWebhookController(c) + if err != nil { + t.Fatal(err) + } + + pod, errRes := wh.handleCreatePodRequest(ctx, *pods[0]) + + if errRes != nil { + t.Fatal("got error, want no error") + } + if tc.wantUpdate && pod == nil { + t.Fatal("got nil, want not nil workload indicating pod updates") + } + if !tc.wantUpdate && pod != nil { + t.Fatal("got non-nil workload, want nil indicating no pod updates") + } + + if err != nil { + t.Fatal(err) + } + + }) + } + +} + +func podWebhookController(cb client.Client) (*PodAdmissionWebhook, context.Context, error) { + ctx := log.IntoContext(context.Background(), logger) + d, err := admission.NewDecoder(cb.Scheme()) + if err != nil { + return nil, nil, err + } + r := &PodAdmissionWebhook{ + Client: cb, + decoder: d, + updater: workload.NewUpdater("cloud-sql-proxy-operator/dev", workload.DefaultProxyImage), + } + + return r, ctx, nil +} diff --git a/internal/controller/proxy_image_upgrade.go b/internal/controller/proxy_image_upgrade.go new file mode 100644 index 00000000..7ed9cedb --- /dev/null +++ b/internal/controller/proxy_image_upgrade.go @@ -0,0 +1,75 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package controller + +import ( + "context" + "fmt" + + cloudsqlapi "github.com/GoogleCloudPlatform/cloud-sql-proxy-operator/internal/api/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/log" +) + +// upgradeDefaultProxyOnStartup is a LeaderElectionRunnable task that will run +// as soon as ControllerRuntime is initialized. It will list all AuthProxyWorkload +// resources in the cluster, and force an update on all resources with the +// default proxy image. If the operator has a different DefaultProxyImage than +// the one used when that AuthProxyWorkload was last reconciled, then Reconcile +// will update the associated workloads in accordance with the RolloutStrategy. +type upgradeDefaultProxyOnStartup struct { + c client.Client +} + +// Start lists all the AuthProxyWorkload resources and triggers the update on +// the resources with a default proxy image. +func (c *upgradeDefaultProxyOnStartup) Start(ctx context.Context) error { + l := &cloudsqlapi.AuthProxyWorkloadList{} + + for { + select { + case <-ctx.Done(): + return nil + default: + // c.c.List() fills l with a paginated list of AuthProxyWorkloads. + // The token in l.Continue field is used get the next page of the list. + // the for loop exits when l.Continue is blank, meaning no more pages. + err := c.c.List(ctx, l, client.Continue(l.Continue)) + if err != nil { + return fmt.Errorf("can't list AuthProxyWorkload on startup, %v", err) + } + + for _, p := range l.Items { + useDefaultImage := p.Spec.AuthProxyContainer == nil || p.Spec.AuthProxyContainer.Image == "" + + if !useDefaultImage { + continue + } + + // If an APW has a default image, then perform an "update" on it so that + // the reconcile function runs and triggers the appropriate rolling updates. + log.FromContext(ctx).Info(fmt.Sprintf("Upgrading workload default images for %s/%s", p.Namespace, p.Namespace)) + err = c.c.Update(ctx, &p) + } + if l.Continue == "" { + return nil + } + } + } +} + +func (c *upgradeDefaultProxyOnStartup) NeedLeaderElection() bool { + return true // only run on the leader +} diff --git a/internal/controller/setup.go b/internal/controller/setup.go index 5b378d88..ac552159 100644 --- a/internal/controller/setup.go +++ b/internal/controller/setup.go @@ -15,7 +15,7 @@ package controller import ( - "github.com/GoogleCloudPlatform/cloud-sql-proxy-operator/internal/api/v1alpha1" + cloudsqlapi "github.com/GoogleCloudPlatform/cloud-sql-proxy-operator/internal/api/v1" "github.com/GoogleCloudPlatform/cloud-sql-proxy-operator/internal/workload" "k8s.io/apimachinery/pkg/runtime" utilruntime "k8s.io/apimachinery/pkg/util/runtime" @@ -32,14 +32,14 @@ var setupLog = ctrl.Log.WithName("setup") func InitScheme(scheme *runtime.Scheme) { utilruntime.Must(clientgoscheme.AddToScheme(scheme)) - utilruntime.Must(v1alpha1.AddToScheme(scheme)) + utilruntime.Must(cloudsqlapi.AddToScheme(scheme)) //+kubebuilder:scaffold:scheme } // SetupManagers was moved out of ../main.go to here so that it can be invoked // from the testintegration tests AND from the actual operator. -func SetupManagers(mgr manager.Manager, userAgent string) error { - u := workload.NewUpdater(userAgent) +func SetupManagers(mgr manager.Manager, userAgent, defaultProxyImage string) error { + u := workload.NewUpdater(userAgent, defaultProxyImage) setupLog.Info("Configuring reconcilers...") var err error @@ -54,7 +54,7 @@ func SetupManagers(mgr manager.Manager, userAgent string) error { return err } - wh := &v1alpha1.AuthProxyWorkload{} + wh := &cloudsqlapi.AuthProxyWorkload{} err = wh.SetupWebhookWithManager(mgr) if err != nil { setupLog.Error(err, "unable to create webhook", "webhook", "AuthProxyWorkload") @@ -72,6 +72,14 @@ func SetupManagers(mgr manager.Manager, userAgent string) error { return err } + // Add the runnable task that will upgrade the proxy image on workloads with + // default container image when the operator first starts. + err = mgr.Add(&upgradeDefaultProxyOnStartup{c: mgr.GetClient()}) + if err != nil { + setupLog.Error(err, "unable to start task to check all AuthProxyWorkloads on startup") + return err + } + setupLog.Info("Configuring reconcilers complete.") return nil } diff --git a/internal/testhelpers/resources.go b/internal/testhelpers/resources.go index 2bf66635..51160c1e 100644 --- a/internal/testhelpers/resources.go +++ b/internal/testhelpers/resources.go @@ -22,12 +22,13 @@ import ( "strconv" "time" - "github.com/GoogleCloudPlatform/cloud-sql-proxy-operator/internal/api/v1alpha1" + cloudsqlapi "github.com/GoogleCloudPlatform/cloud-sql-proxy-operator/internal/api/v1" appsv1 "k8s.io/api/apps/v1" batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" @@ -345,15 +346,15 @@ func (cc *TestCaseClient) CreateWorkload(ctx context.Context, o client.Object) e // GetAuthProxyWorkloadAfterReconcile finds an AuthProxyWorkload resource named key, waits for its // "UpToDate" condition to be "True", and the returns it. Fails after 30 seconds // if the containers does not match. -func (cc *TestCaseClient) GetAuthProxyWorkloadAfterReconcile(ctx context.Context, key types.NamespacedName) (*v1alpha1.AuthProxyWorkload, error) { - createdPodmod := &v1alpha1.AuthProxyWorkload{} +func (cc *TestCaseClient) GetAuthProxyWorkloadAfterReconcile(ctx context.Context, key types.NamespacedName) (*cloudsqlapi.AuthProxyWorkload, error) { + createdPodmod := &cloudsqlapi.AuthProxyWorkload{} // We'll need to retry getting this newly created resource, given that creation may not immediately happen. err := RetryUntilSuccess(6, DefaultRetryInterval, func() error { err := cc.Client.Get(ctx, key, createdPodmod) if err != nil { return err } - if GetConditionStatus(createdPodmod.Status.Conditions, v1alpha1.ConditionUpToDate) != metav1.ConditionTrue { + if GetConditionStatus(createdPodmod.Status.Conditions, cloudsqlapi.ConditionUpToDate) != metav1.ConditionTrue { return errors.New("AuthProxyWorkload found, but reconcile not complete yet") } return nil @@ -527,6 +528,34 @@ func (cc *TestCaseClient) ExpectContainerCount(ctx context.Context, key types.Na // with the correct labels and ownership annotations as if it were in a live cluster. // This will make it easier to test and debug the behavior of our pod injection webhooks. func (cc *TestCaseClient) CreateDeploymentReplicaSetAndPods(ctx context.Context, d *appsv1.Deployment) (*appsv1.ReplicaSet, []*corev1.Pod, error) { + rs, hash, err := BuildDeploymentReplicaSet(d, cc.Client.Scheme()) + if err != nil { + return nil, nil, err + } + + err = cc.Client.Create(ctx, rs) + if err != nil { + return nil, nil, err + } + + pods, err := BuildDeploymentReplicaSetPods(d, rs, hash, cc.Client.Scheme()) + if err != nil { + return nil, nil, err + } + + for _, p := range pods { + err = cc.Client.Create(ctx, p) + if err != nil { + return rs, nil, err + } + } + return rs, pods, nil +} + +// BuildDeploymentReplicaSet mimics the behavior of the deployment controller +// built into kubernetes. It builds one ReplicaSet and DeploymentSpec.Replicas pods +// with the correct labels and ownership annotations as if it were in a live cluster. +func BuildDeploymentReplicaSet(d *appsv1.Deployment, scheme *runtime.Scheme) (*appsv1.ReplicaSet, string, error) { podTemplateHash := strconv.FormatUint(rand.Uint64(), 16) rs := &appsv1.ReplicaSet{ TypeMeta: metav1.TypeMeta{Kind: "ReplicaSet", APIVersion: "apps/metav1"}, @@ -551,16 +580,14 @@ func (cc *TestCaseClient) CreateDeploymentReplicaSetAndPods(ctx context.Context, Template: d.Spec.Template, }, } - - err := controllerutil.SetOwnerReference(d, rs, cc.Client.Scheme()) + err := controllerutil.SetOwnerReference(d, rs, scheme) if err != nil { - return nil, nil, err + return nil, "", err } + return rs, podTemplateHash, nil +} - err = cc.Client.Create(ctx, rs) - if err != nil { - return nil, nil, err - } +func BuildDeploymentReplicaSetPods(d *appsv1.Deployment, rs *appsv1.ReplicaSet, podTemplateHash string, scheme *runtime.Scheme) ([]*corev1.Pod, error) { var replicas int32 if d.Spec.Replicas != nil { @@ -585,23 +612,19 @@ func (cc *TestCaseClient) CreateDeploymentReplicaSetAndPods(ctx context.Context, }, Spec: d.Spec.Template.Spec, } - err = controllerutil.SetOwnerReference(rs, p, cc.Client.Scheme()) + err := controllerutil.SetOwnerReference(rs, p, scheme) if err != nil { - return rs, nil, err + return nil, err } - err = cc.Client.Create(ctx, p) - if err != nil { - return rs, nil, err - } pods = append(pods, p) } - return rs, pods, nil + return pods, nil } // BuildAuthProxyWorkload creates an AuthProxyWorkload object with a // single instance with a tcp connection. -func BuildAuthProxyWorkload(key types.NamespacedName, connectionString string) *v1alpha1.AuthProxyWorkload { +func BuildAuthProxyWorkload(key types.NamespacedName, connectionString string) *cloudsqlapi.AuthProxyWorkload { p := NewAuthProxyWorkload(key) AddTCPInstance(p, connectionString) return p @@ -609,8 +632,8 @@ func BuildAuthProxyWorkload(key types.NamespacedName, connectionString string) * // AddTCPInstance adds a database instance with a tcp connection, setting // HostEnvName to "DB_HOST" and PortEnvName to "DB_PORT". -func AddTCPInstance(p *v1alpha1.AuthProxyWorkload, connectionString string) { - p.Spec.Instances = append(p.Spec.Instances, v1alpha1.InstanceSpec{ +func AddTCPInstance(p *cloudsqlapi.AuthProxyWorkload, connectionString string) { + p.Spec.Instances = append(p.Spec.Instances, cloudsqlapi.InstanceSpec{ ConnectionString: connectionString, HostEnvName: "DB_HOST", PortEnvName: "DB_PORT", @@ -619,8 +642,8 @@ func AddTCPInstance(p *v1alpha1.AuthProxyWorkload, connectionString string) { // AddUnixInstance adds a database instance with a unix socket connection, // setting UnixSocketPathEnvName to "DB_PATH". -func AddUnixInstance(p *v1alpha1.AuthProxyWorkload, connectionString string, path string) { - p.Spec.Instances = append(p.Spec.Instances, v1alpha1.InstanceSpec{ +func AddUnixInstance(p *cloudsqlapi.AuthProxyWorkload, connectionString string, path string) { + p.Spec.Instances = append(p.Spec.Instances, cloudsqlapi.InstanceSpec{ ConnectionString: connectionString, UnixSocketPath: path, UnixSocketPathEnvName: "DB_PATH", @@ -629,10 +652,10 @@ func AddUnixInstance(p *v1alpha1.AuthProxyWorkload, connectionString string, pat // NewAuthProxyWorkload creates a new AuthProxyWorkload with the // TypeMeta, name and namespace set. -func NewAuthProxyWorkload(key types.NamespacedName) *v1alpha1.AuthProxyWorkload { - return &v1alpha1.AuthProxyWorkload{ +func NewAuthProxyWorkload(key types.NamespacedName) *cloudsqlapi.AuthProxyWorkload { + return &cloudsqlapi.AuthProxyWorkload{ TypeMeta: metav1.TypeMeta{ - APIVersion: v1alpha1.GroupVersion.String(), + APIVersion: cloudsqlapi.GroupVersion.String(), Kind: "AuthProxyWorkload", }, ObjectMeta: metav1.ObjectMeta{ @@ -643,7 +666,7 @@ func NewAuthProxyWorkload(key types.NamespacedName) *v1alpha1.AuthProxyWorkload } // CreateAuthProxyWorkload creates an AuthProxyWorkload in the kubernetes cluster. -func (cc *TestCaseClient) CreateAuthProxyWorkload(ctx context.Context, key types.NamespacedName, appLabel string, connectionString string, kind string) (*v1alpha1.AuthProxyWorkload, error) { +func (cc *TestCaseClient) CreateAuthProxyWorkload(ctx context.Context, key types.NamespacedName, appLabel string, connectionString string, kind string) (*cloudsqlapi.AuthProxyWorkload, error) { p := NewAuthProxyWorkload(key) AddTCPInstance(p, connectionString) cc.ConfigureSelector(p, appLabel, kind) @@ -653,8 +676,8 @@ func (cc *TestCaseClient) CreateAuthProxyWorkload(ctx context.Context, key types // ConfigureSelector Configures the workload selector on AuthProxyWorkload to use the label selector // "app=${appLabel}" -func (cc *TestCaseClient) ConfigureSelector(proxy *v1alpha1.AuthProxyWorkload, appLabel string, kind string) { - proxy.Spec.Workload = v1alpha1.WorkloadSelectorSpec{ +func (cc *TestCaseClient) ConfigureSelector(proxy *cloudsqlapi.AuthProxyWorkload, appLabel string, kind string) { + proxy.Spec.Workload = cloudsqlapi.WorkloadSelectorSpec{ Kind: kind, Selector: &metav1.LabelSelector{ MatchLabels: map[string]string{"app": appLabel}, @@ -663,8 +686,8 @@ func (cc *TestCaseClient) ConfigureSelector(proxy *v1alpha1.AuthProxyWorkload, a } // ConfigureResources Configures resource requests -func (cc *TestCaseClient) ConfigureResources(proxy *v1alpha1.AuthProxyWorkload) { - proxy.Spec.AuthProxyContainer = &v1alpha1.AuthProxyContainerSpec{ +func (cc *TestCaseClient) ConfigureResources(proxy *cloudsqlapi.AuthProxyWorkload) { + proxy.Spec.AuthProxyContainer = &cloudsqlapi.AuthProxyContainerSpec{ Image: cc.ProxyImageURL, Resources: &corev1.ResourceRequirements{ Requests: map[corev1.ResourceName]resource.Quantity{ @@ -674,7 +697,7 @@ func (cc *TestCaseClient) ConfigureResources(proxy *v1alpha1.AuthProxyWorkload) } } -func (cc *TestCaseClient) Create(ctx context.Context, proxy *v1alpha1.AuthProxyWorkload) error { +func (cc *TestCaseClient) Create(ctx context.Context, proxy *cloudsqlapi.AuthProxyWorkload) error { err := cc.Client.Create(ctx, proxy) if err != nil { return fmt.Errorf("Unable to create entity %v", err) diff --git a/internal/testhelpers/testcases.go b/internal/testhelpers/testcases.go index 10317ade..838c36b4 100644 --- a/internal/testhelpers/testcases.go +++ b/internal/testhelpers/testcases.go @@ -19,7 +19,7 @@ import ( "errors" "fmt" - cloudsqlapi "github.com/GoogleCloudPlatform/cloud-sql-proxy-operator/internal/api/v1alpha1" + cloudsqlapi "github.com/GoogleCloudPlatform/cloud-sql-proxy-operator/internal/api/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/rand" diff --git a/internal/testintegration/integration_test.go b/internal/testintegration/integration_test.go index 8181c927..ff49fdaa 100644 --- a/internal/testintegration/integration_test.go +++ b/internal/testintegration/integration_test.go @@ -18,12 +18,18 @@ package testintegration_test import ( + "context" + "fmt" "os" "testing" "github.com/GoogleCloudPlatform/cloud-sql-proxy-operator/internal/testhelpers" "github.com/GoogleCloudPlatform/cloud-sql-proxy-operator/internal/testintegration" + "github.com/GoogleCloudPlatform/cloud-sql-proxy-operator/internal/workload" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" ) func TestMain(m *testing.M) { @@ -47,7 +53,6 @@ func newTestCaseClient(name string) *testhelpers.TestCaseClient { Client: testintegration.Client, Namespace: testhelpers.NewNamespaceName(name), ConnectionString: "region:project:inst", - ProxyImageURL: "proxy-image:latest", } } @@ -178,26 +183,135 @@ func TestModifiesExistingDeployment(t *testing.T) { t.Fatal(err) } - // Then we simulate the deployment pods being replaced - err = tcc.Client.Delete(ctx, rs1) + err = recreatePodsAfterDeploymentUpdate(ctx, tcc, d, rs1, pods) + if err != nil { + t.Fatal(err) + } + // and check for 2 containers + err = tcc.ExpectPodContainerCount(ctx, d.Spec.Selector, 2, "all") if err != nil { t.Fatal(err) } +} - for i := 0; i < len(pods); i++ { - err = tcc.Client.Delete(ctx, pods[i]) - if err != nil { - t.Fatal(err) +// TestUpdateWorkloadContainerWhenDefaultProxyImageChanges is the test that +// demonstrates that when the operator's default image changes, it will +// automatically update the proxy container image on existing deployments. +func TestUpdateWorkloadContainerWhenDefaultProxyImageChanges(t *testing.T) { + ctx := testintegration.TestContext() + tcc := newTestCaseClient("modifynew") + + err := tcc.CreateOrPatchNamespace(ctx) + if err != nil { + t.Fatalf("can't create namespace, %v", err) + } + + const ( + pwlName = "newdeploy" + deploymentAppLabel = "busybox" + ) + key := types.NamespacedName{Name: pwlName, Namespace: tcc.Namespace} + + t.Log("Creating AuthProxyWorkload") + p, err := tcc.CreateAuthProxyWorkload(ctx, key, deploymentAppLabel, tcc.ConnectionString, "Deployment") + if err != nil { + t.Error(err) + return + } + + t.Log("Waiting for AuthProxyWorkload operator to begin the reconcile loop") + _, err = tcc.GetAuthProxyWorkloadAfterReconcile(ctx, key) + if err != nil { + t.Error("unable to create AuthProxyWorkload", err) + return + } + + t.Log("Creating deployment") + d := testhelpers.BuildDeployment(key, deploymentAppLabel) + err = tcc.CreateWorkload(ctx, d) + if err != nil { + t.Error("unable to create deployment", err) + return + } + + t.Log("Creating deployment replicas") + rs, pl, err := tcc.CreateDeploymentReplicaSetAndPods(ctx, d) + if err != nil { + t.Error("unable to create pods", err) + return + } + + // Check that proxy container was added to pods + err = tcc.ExpectPodContainerCount(ctx, d.Spec.Selector, 2, "all") + if err != nil { + t.Error(err) + } + + // Check that the pods have the expected default proxy image + pods, err := testhelpers.ListPods(ctx, tcc.Client, tcc.Namespace, d.Spec.Selector) + for _, p := range pods.Items { + if got, want := p.Spec.Containers[1].Image, workload.DefaultProxyImage; got != want { + t.Errorf("got %v, want %v image before operator upgrade", got, want) } } - _, _, err = tcc.CreateDeploymentReplicaSetAndPods(ctx, d) + + // Restart the manager with a new default proxy image + const newDefault = "gcr.io/cloud-sql-connectors/cloud-sql-proxy:999.9.9" + err = testintegration.RestartManager(newDefault) + if err != nil { + t.Fatal("can't restart container", err) + } + + // Get the related deployment. Make sure that annotations were + // set on the pod template + err = testhelpers.RetryUntilSuccess(24, testhelpers.DefaultRetryInterval, func() error { + ud := &appsv1.Deployment{} + err = tcc.Client.Get(ctx, client.ObjectKeyFromObject(d), ud) + wantK, wantV := workload.PodAnnotation(p, newDefault) + gotV := ud.Spec.Template.Annotations[wantK] + if gotV != wantV { + return fmt.Errorf("got %s, want %s for podspec annotation on deployment", gotV, wantV) + } + return nil + }) + if err != nil { t.Fatal(err) } - // and check for 2 containers - err = tcc.ExpectPodContainerCount(ctx, d.Spec.Selector, 2, "all") + // Recreate the ReplicaSet and Pods as would happen when the deployment + // PodTemplate changed. + err = recreatePodsAfterDeploymentUpdate(ctx, tcc, d, rs, pl) if err != nil { t.Fatal(err) } + + // Check that the new pods have the new default proxy image + pods, err = testhelpers.ListPods(ctx, tcc.Client, tcc.Namespace, d.Spec.Selector) + for _, p := range pods.Items { + if got, want := p.Spec.Containers[1].Image, newDefault; got != want { + t.Errorf("got %v, want %v image before operator upgrade", got, want) + } + } + +} + +func recreatePodsAfterDeploymentUpdate(ctx context.Context, tcc *testhelpers.TestCaseClient, d *appsv1.Deployment, rs1 *appsv1.ReplicaSet, pods []*corev1.Pod) error { + // Then we simulate the deployment pods being replaced + err := tcc.Client.Delete(ctx, rs1) + if err != nil { + return err + } + + for i := 0; i < len(pods); i++ { + err = tcc.Client.Delete(ctx, pods[i]) + if err != nil { + return err + } + } + _, _, err = tcc.CreateDeploymentReplicaSetAndPods(ctx, d) + if err != nil { + return err + } + return nil } diff --git a/internal/testintegration/setup.go b/internal/testintegration/setup.go index a684026d..87403696 100644 --- a/internal/testintegration/setup.go +++ b/internal/testintegration/setup.go @@ -28,14 +28,16 @@ import ( "github.com/GoogleCloudPlatform/cloud-sql-proxy-operator/internal/controller" "github.com/GoogleCloudPlatform/cloud-sql-proxy-operator/internal/testhelpers" + "github.com/GoogleCloudPlatform/cloud-sql-proxy-operator/internal/workload" "github.com/go-logr/logr" "go.uber.org/zap/zapcore" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/client-go/kubernetes/scheme" - "sigs.k8s.io/controller-runtime/pkg/log/zap" - + "k8s.io/client-go/rest" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/envtest" + "sigs.k8s.io/controller-runtime/pkg/log/zap" ) const kubeVersion = "1.24.1" @@ -51,6 +53,9 @@ var ( // Client is the kubernetes client. Client client.Client + + // RestartManager is the controller-runtime manager for the operator + RestartManager func(string) error ) // TestContext returns a background context that includes appropriate logging configuration. @@ -129,6 +134,23 @@ func EnvTestSetup() (func(), error) { return teardownFunc, fmt.Errorf("unable to start kuberenetes envtest %v", err) } + mgrCancelFunc, err := startManager(ctx, cfg, s, workload.DefaultProxyImage) + if err != nil { + return teardownFunc, fmt.Errorf("unable to start kuberenetes envtest %v", err) + } + + RestartManager = func(defaultProxyImage string) error { + mgrCancelFunc() + mgrCancelFunc, err = startManager(ctx, cfg, s, defaultProxyImage) + return err + } + + return teardownFunc, nil +} + +func startManager(ctx context.Context, cfg *rest.Config, s *runtime.Scheme, proxyImage string) (context.CancelFunc, error) { + ctx, managerCancelFunc := context.WithCancel(ctx) + // start webhook server using Manager o := &testEnv.WebhookInstallOptions mgr, err := ctrl.NewManager(cfg, ctrl.Options{ @@ -140,12 +162,13 @@ func EnvTestSetup() (func(), error) { MetricsBindAddress: "0", }) if err != nil { - return teardownFunc, fmt.Errorf("unable to start kuberenetes envtest %v", err) + return managerCancelFunc, fmt.Errorf("unable to start kuberenetes envtest %v", err) } - err = controller.SetupManagers(mgr, "cloud-sql-proxy-operator/dev") + err = controller.SetupManagers(mgr, "cloud-sql-proxy-operator/dev", proxyImage) + if err != nil { - return teardownFunc, fmt.Errorf("unable to start kuberenetes envtest %v", err) + return managerCancelFunc, fmt.Errorf("unable to start kuberenetes envtest %v", err) } go func() { @@ -177,5 +200,5 @@ func EnvTestSetup() (func(), error) { Log.Info("Setup complete. Webhook server started.") - return teardownFunc, nil + return managerCancelFunc, nil } diff --git a/internal/workload/names.go b/internal/workload/names.go index b3087a09..147a6b20 100644 --- a/internal/workload/names.go +++ b/internal/workload/names.go @@ -19,7 +19,7 @@ import ( "hash/fnv" "strings" - cloudsqlapi "github.com/GoogleCloudPlatform/cloud-sql-proxy-operator/internal/api/v1alpha1" + cloudsqlapi "github.com/GoogleCloudPlatform/cloud-sql-proxy-operator/internal/api/v1" ) // ContainerPrefix is the name prefix used on containers added to PodSpecs diff --git a/internal/workload/names_test.go b/internal/workload/names_test.go index 8adf2efc..db26a294 100644 --- a/internal/workload/names_test.go +++ b/internal/workload/names_test.go @@ -19,7 +19,7 @@ import ( "github.com/GoogleCloudPlatform/cloud-sql-proxy-operator/internal/workload" - "github.com/GoogleCloudPlatform/cloud-sql-proxy-operator/internal/api/v1alpha1" + cloudsqlapi "github.com/GoogleCloudPlatform/cloud-sql-proxy-operator/internal/api/v1" ) func TestSafePrefixedName(t *testing.T) { @@ -73,7 +73,7 @@ func TestSafePrefixedName(t *testing.T) { } func TestContainerName(t *testing.T) { - csql := authProxyWorkload("hello-world", []v1alpha1.InstanceSpec{{ConnectionString: "proj:inst:db"}}) + csql := authProxyWorkload("hello-world", []cloudsqlapi.InstanceSpec{{ConnectionString: "proj:inst:db"}}) got := workload.ContainerName(csql) want := "csql-default-hello-world" if want != got { @@ -82,7 +82,7 @@ func TestContainerName(t *testing.T) { } func TestVolumeName(t *testing.T) { - csql := authProxyWorkload("hello-world", []v1alpha1.InstanceSpec{{ConnectionString: "proj:inst:db"}}) + csql := authProxyWorkload("hello-world", []cloudsqlapi.InstanceSpec{{ConnectionString: "proj:inst:db"}}) got := workload.VolumeName(csql, &csql.Spec.Instances[0], "temp") want := "csql-hello-world-temp-proj-inst-db" if want != got { diff --git a/internal/workload/podspec_updates.go b/internal/workload/podspec_updates.go index 4c0a90a5..b49ec8ff 100644 --- a/internal/workload/podspec_updates.go +++ b/internal/workload/podspec_updates.go @@ -29,14 +29,17 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" logf "sigs.k8s.io/controller-runtime/pkg/log" - cloudsqlapi "github.com/GoogleCloudPlatform/cloud-sql-proxy-operator/internal/api/v1alpha1" + cloudsqlapi "github.com/GoogleCloudPlatform/cloud-sql-proxy-operator/internal/api/v1" ) // Constants for well known error codes and defaults. These are exposed on the // package and documented here so that they appear in the godoc. These also // need to be documented in the CRD const ( - DefaultProxyImage = "gcr.io/cloud-sql-connectors/cloud-sql-proxy:2.1.0" + // DefaultProxyImage is the latest version of the proxy as of the release + // of this operator. This is managed as a dependency. We update this constant + // when the Cloud SQL Auth Proxy releases a new version. + DefaultProxyImage = "gcr.io/cloud-sql-connectors/cloud-sql-proxy:2.1.2" // DefaultFirstPort is the first port number chose for an instance listener by the // proxy. @@ -45,6 +48,10 @@ const ( // DefaultHealthCheckPort is the used by the proxy to expose prometheus // and kubernetes health checks. DefaultHealthCheckPort int32 = 9801 + + // DefaultAdminPort is the used by the proxy to expose prometheus + // and kubernetes health checks. + DefaultAdminPort int32 = 9802 ) var l = logf.Log.WithName("internal.workload") @@ -52,27 +59,42 @@ var l = logf.Log.WithName("internal.workload") // PodAnnotation returns the annotation (key, value) that should be added to // pods that are configured with this AuthProxyWorkload resource. This takes // into account whether the AuthProxyWorkload exists or was recently deleted. -func PodAnnotation(r *cloudsqlapi.AuthProxyWorkload) (string, string) { +// The defaultProxyImage is part of the annotation value. +func PodAnnotation(r *cloudsqlapi.AuthProxyWorkload, defaultProxyImage string) (string, string) { + img := defaultProxyImage + if r.Spec.AuthProxyContainer != nil && r.Spec.AuthProxyContainer.Image != "" { + img = "" + } k := fmt.Sprintf("%s/%s", cloudsqlapi.AnnotationPrefix, r.Name) - v := fmt.Sprintf("%d", r.Generation) + v := fmt.Sprintf("%d,%s", r.Generation, img) // if r was deleted, use a different value if !r.GetDeletionTimestamp().IsZero() { - v = fmt.Sprintf("%d-deleted-%s", r.Generation, r.GetDeletionTimestamp().Format(time.RFC3339)) + v = fmt.Sprintf("%d-deleted-%s,%s", r.Generation, r.GetDeletionTimestamp().Format(time.RFC3339), img) } return k, v } +// PodAnnotation returns the annotation (key, value) that should be added to +// pods that are configured with this AuthProxyWorkload resource. This takes +// into account whether the AuthProxyWorkload exists or was recently deleted. +func (u *Updater) PodAnnotation(r *cloudsqlapi.AuthProxyWorkload) (string, string) { + return PodAnnotation(r, u.defaultProxyImage) +} + // Updater holds global state used while reconciling workloads. type Updater struct { // userAgent is the userAgent of the operator userAgent string + + // defaultProxyImage is the current default proxy image for the operator + defaultProxyImage string } // NewUpdater creates a new instance of Updater with a supplier // that loads the default proxy impage from the public docker registry -func NewUpdater(userAgent string) *Updater { - return &Updater{userAgent: userAgent} +func NewUpdater(userAgent string, defaultProxyImage string) *Updater { + return &Updater{userAgent: userAgent, defaultProxyImage: defaultProxyImage} } // ConfigError is an error with extra details about why an AuthProxyWorkload @@ -462,7 +484,7 @@ func (s *updateState) update(wl *PodWorkload, matches []*cloudsqlapi.AuthProxyWo containers = append(containers, newContainer) // Add pod annotation for each instance - k, v := PodAnnotation(inst) + k, v := s.updater.PodAnnotation(inst) ann[k] = v } @@ -503,6 +525,10 @@ func (s *updateState) updateContainer(p *cloudsqlapi.AuthProxyWorkload, wl Workl // always enable http port healthchecks on 0.0.0.0 and structured logs s.addHealthCheck(p, c) + s.applyTelemetrySpec(p) + + // enable the proxy's admin service + s.addAdminServer(p) // add the user agent s.addProxyContainerEnvVar(p, "CSQL_PROXY_USER_AGENT", s.updater.userAgent) @@ -631,6 +657,42 @@ func (s *updateState) applyContainerSpec(p *cloudsqlapi.AuthProxyWorkload, c *co return } +// applyTelemetrySpec applies settings from cloudsqlapi.TelemetrySpec +// to the container +func (s *updateState) applyTelemetrySpec(p *cloudsqlapi.AuthProxyWorkload) { + if p.Spec.AuthProxyContainer == nil || p.Spec.AuthProxyContainer.Telemetry == nil { + return + } + tel := p.Spec.AuthProxyContainer.Telemetry + + if tel.TelemetrySampleRate != nil { + s.addProxyContainerEnvVar(p, "CSQL_PROXY_TELEMETRY_SAMPLE_RATE", fmt.Sprintf("%d", *tel.TelemetrySampleRate)) + } + if tel.DisableTraces != nil && *tel.DisableTraces { + s.addProxyContainerEnvVar(p, "CSQL_PROXY_DISABLE_TRACES", "true") + } + if tel.DisableMetrics != nil && *tel.DisableMetrics { + s.addProxyContainerEnvVar(p, "CSQL_PROXY_DISABLE_METRICS", "true") + } + if tel.PrometheusNamespace != nil || (tel.Prometheus != nil && *tel.Prometheus) { + s.addProxyContainerEnvVar(p, "CSQL_PROXY_PROMETHEUS", "true") + } + if tel.PrometheusNamespace != nil { + s.addProxyContainerEnvVar(p, "CSQL_PROXY_PROMETHEUS_NAMESPACE", *tel.PrometheusNamespace) + } + if tel.TelemetryProject != nil { + s.addProxyContainerEnvVar(p, "CSQL_PROXY_TELEMETRY_PROJECT", *tel.TelemetryProject) + } + if tel.TelemetryPrefix != nil { + s.addProxyContainerEnvVar(p, "CSQL_PROXY_TELEMETRY_PREFIX", *tel.TelemetryPrefix) + } + if tel.QuotaProject != nil { + s.addProxyContainerEnvVar(p, "CSQL_PROXY_QUOTA_PROJECT", *tel.QuotaProject) + } + + return +} + // updateContainerEnv applies global container state to all containers func (s *updateState) updateContainerEnv(c *corev1.Container) { for i := 0; i < len(s.mods.EnvVars); i++ { @@ -663,7 +725,7 @@ func (s *updateState) addHealthCheck(p *cloudsqlapi.AuthProxyWorkload, c *corev1 cs := p.Spec.AuthProxyContainer - // if the TelemetrySpec.exists, get Port values + // if the TelemetrySpec.exists, get Port and Port values if cs != nil && cs.Telemetry != nil { if cs.Telemetry.HTTPPort != nil { portPtr = cs.Telemetry.HTTPPort @@ -694,11 +756,30 @@ func (s *updateState) addHealthCheck(p *cloudsqlapi.AuthProxyWorkload, c *corev1 PeriodSeconds: 30, } // Add a port that is associated with the proxy, but not a specific db instance - s.addPort(port, proxyInstanceID{AuthProxyWorkload: types.NamespacedName{Namespace: p.Namespace, Name: p.Name}}) + s.addProxyPort(port, p) s.addProxyContainerEnvVar(p, "CSQL_PROXY_HTTP_PORT", fmt.Sprintf("%d", port)) s.addProxyContainerEnvVar(p, "CSQL_PROXY_HTTP_ADDRESS", "0.0.0.0") s.addProxyContainerEnvVar(p, "CSQL_PROXY_HEALTH_CHECK", "true") - return +} + +func (s *updateState) addAdminServer(p *cloudsqlapi.AuthProxyWorkload) { + + if p.Spec.AuthProxyContainer == nil || p.Spec.AuthProxyContainer.AdminServer == nil { + return + } + + cs := p.Spec.AuthProxyContainer.AdminServer + s.addProxyPort(cs.Port, p) + s.addProxyContainerEnvVar(p, "CSQL_PROXY_ADMIN_PORT", fmt.Sprintf("%d", cs.Port)) + for _, name := range cs.EnableAPIs { + switch name { + case "Debug": + s.addProxyContainerEnvVar(p, "CSQL_PROXY_DEBUG", "true") + case "QuitQuitQuit": + s.addProxyContainerEnvVar(p, "CSQL_PROXY_QUITQUITQUIT", "true") + } + } + } func (s *updateState) addVolumeMount(p *cloudsqlapi.AuthProxyWorkload, is *cloudsqlapi.InstanceSpec, m corev1.VolumeMount, v corev1.Volume) { @@ -782,7 +863,7 @@ func (s *updateState) addError(errorCode, description string, p *cloudsqlapi.Aut } func (s *updateState) defaultProxyImage() string { - return DefaultProxyImage + return s.updater.defaultProxyImage } func (s *updateState) usePort(configValue *int32, defaultValue int32, p *cloudsqlapi.AuthProxyWorkload) int32 { diff --git a/internal/workload/podspec_updates_test.go b/internal/workload/podspec_updates_test.go index 85af0bfd..6720540b 100644 --- a/internal/workload/podspec_updates_test.go +++ b/internal/workload/podspec_updates_test.go @@ -21,7 +21,7 @@ import ( "testing" "time" - "github.com/GoogleCloudPlatform/cloud-sql-proxy-operator/internal/api/v1alpha1" + cloudsqlapi "github.com/GoogleCloudPlatform/cloud-sql-proxy-operator/internal/api/v1" "github.com/GoogleCloudPlatform/cloud-sql-proxy-operator/internal/workload" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" @@ -39,15 +39,15 @@ func podWorkload() *workload.PodWorkload { }} } -func simpleAuthProxy(name, connectionString string) *v1alpha1.AuthProxyWorkload { - return authProxyWorkload(name, []v1alpha1.InstanceSpec{{ +func simpleAuthProxy(name, connectionString string) *cloudsqlapi.AuthProxyWorkload { + return authProxyWorkload(name, []cloudsqlapi.InstanceSpec{{ ConnectionString: connectionString, }}) } -func authProxyWorkload(name string, instances []v1alpha1.InstanceSpec) *v1alpha1.AuthProxyWorkload { - return authProxyWorkloadFromSpec(name, v1alpha1.AuthProxyWorkloadSpec{ - Workload: v1alpha1.WorkloadSelectorSpec{ +func authProxyWorkload(name string, instances []cloudsqlapi.InstanceSpec) *cloudsqlapi.AuthProxyWorkload { + return authProxyWorkloadFromSpec(name, cloudsqlapi.AuthProxyWorkloadSpec{ + Workload: cloudsqlapi.WorkloadSelectorSpec{ Kind: "Deployment", Selector: &metav1.LabelSelector{ MatchLabels: map[string]string{"app": "hello"}, @@ -56,13 +56,13 @@ func authProxyWorkload(name string, instances []v1alpha1.InstanceSpec) *v1alpha1 Instances: instances, }) } -func authProxyWorkloadFromSpec(name string, spec v1alpha1.AuthProxyWorkloadSpec) *v1alpha1.AuthProxyWorkload { - proxy := &v1alpha1.AuthProxyWorkload{ - TypeMeta: metav1.TypeMeta{Kind: "AuthProxyWorkload", APIVersion: v1alpha1.GroupVersion.String()}, +func authProxyWorkloadFromSpec(name string, spec cloudsqlapi.AuthProxyWorkloadSpec) *cloudsqlapi.AuthProxyWorkload { + proxy := &cloudsqlapi.AuthProxyWorkload{ + TypeMeta: metav1.TypeMeta{Kind: "AuthProxyWorkload", APIVersion: cloudsqlapi.GroupVersion.String()}, ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: "default", Generation: 1}, Spec: spec, } - proxy.Spec.Workload = v1alpha1.WorkloadSelectorSpec{ + proxy.Spec.Workload = cloudsqlapi.WorkloadSelectorSpec{ Kind: "Deployment", Selector: &metav1.LabelSelector{ MatchLabels: map[string]string{"app": "hello"}, @@ -122,8 +122,8 @@ func logPodSpec(t *testing.T, wl *workload.PodWorkload) { t.Logf("PodSpec: %s", string(podSpecYaml)) } -func configureProxies(u *workload.Updater, wl *workload.PodWorkload, proxies []*v1alpha1.AuthProxyWorkload) error { - l := &v1alpha1.AuthProxyWorkloadList{Items: make([]v1alpha1.AuthProxyWorkload, len(proxies))} +func configureProxies(u *workload.Updater, wl *workload.PodWorkload, proxies []*cloudsqlapi.AuthProxyWorkload) error { + l := &cloudsqlapi.AuthProxyWorkloadList{Items: make([]cloudsqlapi.AuthProxyWorkload, len(proxies))} for i := 0; i < len(proxies); i++ { l.Items[i] = *proxies[i] } @@ -139,7 +139,7 @@ func TestUpdatePodWorkload(t *testing.T) { wantContainerName = "csql-default-" + wantsName wantsInstanceName = "project:server:db" wantsInstanceArg = fmt.Sprintf("%s?port=%d", wantsInstanceName, wantsPort) - u = workload.NewUpdater("cloud-sql-proxy-operator/dev") + u = workload.NewUpdater("cloud-sql-proxy-operator/dev", workload.DefaultProxyImage) ) var err error @@ -157,7 +157,7 @@ func TestUpdatePodWorkload(t *testing.T) { proxy.Spec.Instances[0].Port = ptr(wantsPort) // Update the container with new markWorkloadNeedsUpdate - err = configureProxies(u, wl, []*v1alpha1.AuthProxyWorkload{proxy}) + err = configureProxies(u, wl, []*cloudsqlapi.AuthProxyWorkload{proxy}) if err != nil { t.Fatal(err) } @@ -194,7 +194,7 @@ func TestUpdateWorkloadFixedPort(t *testing.T) { "DB_HOST": "127.0.0.1", "DB_PORT": strconv.Itoa(int(wantsPort)), } - u = workload.NewUpdater("cloud-sql-proxy-operator/dev") + u = workload.NewUpdater("cloud-sql-proxy-operator/dev", workload.DefaultProxyImage) ) // Create a pod @@ -203,8 +203,8 @@ func TestUpdateWorkloadFixedPort(t *testing.T) { []corev1.ContainerPort{{Name: "http", ContainerPort: 8080}} // Create a AuthProxyWorkload that matches the deployment - csqls := []*v1alpha1.AuthProxyWorkload{ - authProxyWorkload("instance1", []v1alpha1.InstanceSpec{{ + csqls := []*cloudsqlapi.AuthProxyWorkload{ + authProxyWorkload("instance1", []cloudsqlapi.InstanceSpec{{ ConnectionString: wantsInstanceName, Port: &wantsPort, PortEnvName: "DB_PORT", @@ -262,7 +262,7 @@ func TestWorkloadNoPortSet(t *testing.T) { "DB_PORT": strconv.Itoa(int(wantsPort)), } ) - u := workload.NewUpdater("cloud-sql-proxy-operator/dev") + u := workload.NewUpdater("cloud-sql-proxy-operator/dev", workload.DefaultProxyImage) // Create a pod wl := podWorkload() @@ -270,8 +270,8 @@ func TestWorkloadNoPortSet(t *testing.T) { []corev1.ContainerPort{{Name: "http", ContainerPort: 8080}} // Create a AuthProxyWorkload that matches the deployment - csqls := []*v1alpha1.AuthProxyWorkload{ - authProxyWorkload("instance1", []v1alpha1.InstanceSpec{{ + csqls := []*cloudsqlapi.AuthProxyWorkload{ + authProxyWorkload("instance1", []cloudsqlapi.InstanceSpec{{ ConnectionString: wantsInstanceName, PortEnvName: "DB_PORT", HostEnvName: "DB_HOST", @@ -320,7 +320,7 @@ func TestContainerImageChanged(t *testing.T) { var ( wantsInstanceName = "project:server:db" wantImage = "custom-image:latest" - u = workload.NewUpdater("cloud-sql-proxy-operator/dev") + u = workload.NewUpdater("cloud-sql-proxy-operator/dev", workload.DefaultProxyImage) ) // Create a pod @@ -329,10 +329,10 @@ func TestContainerImageChanged(t *testing.T) { []corev1.ContainerPort{{Name: "http", ContainerPort: 8080}} // Create a AuthProxyWorkload that matches the deployment - csqls := []*v1alpha1.AuthProxyWorkload{ + csqls := []*cloudsqlapi.AuthProxyWorkload{ simpleAuthProxy("instance1", wantsInstanceName), } - csqls[0].Spec.AuthProxyContainer = &v1alpha1.AuthProxyContainerSpec{Image: wantImage} + csqls[0].Spec.AuthProxyContainer = &cloudsqlapi.AuthProxyContainerSpec{Image: wantImage} // update the containers err := configureProxies(u, wl, csqls) @@ -362,13 +362,13 @@ func TestContainerImageEmpty(t *testing.T) { var ( wantsInstanceName = "project:server:db" wantImage = workload.DefaultProxyImage - u = workload.NewUpdater("cloud-sql-proxy-operator/dev") + u = workload.NewUpdater("cloud-sql-proxy-operator/dev", workload.DefaultProxyImage) ) // Create a AuthProxyWorkload that matches the deployment // create an AuthProxyContainer that has a value, but Image is empty. p1 := simpleAuthProxy("instance1", wantsInstanceName) - p1.Spec.AuthProxyContainer = &v1alpha1.AuthProxyContainerSpec{MaxConnections: ptr(int64(5))} + p1.Spec.AuthProxyContainer = &cloudsqlapi.AuthProxyContainerSpec{MaxConnections: ptr(int64(5))} // create an AuthProxyContainer where AuthProxyContainer is nil p2 := simpleAuthProxy("instance1", wantsInstanceName) @@ -376,7 +376,7 @@ func TestContainerImageEmpty(t *testing.T) { tests := []struct { name string - proxy *v1alpha1.AuthProxyWorkload + proxy *cloudsqlapi.AuthProxyWorkload }{ {name: "Image is empty", proxy: p1}, {name: "AuthProxyContainer is nil", proxy: p2}, @@ -387,7 +387,7 @@ func TestContainerImageEmpty(t *testing.T) { wl := podWorkload() wl.Pod.Spec.Containers[0].Ports = []corev1.ContainerPort{{Name: "http", ContainerPort: 8080}} - csqls := []*v1alpha1.AuthProxyWorkload{test.proxy} + csqls := []*cloudsqlapi.AuthProxyWorkload{test.proxy} // update the containers err := configureProxies(u, wl, csqls) @@ -421,7 +421,7 @@ func TestContainerReplaced(t *testing.T) { wantContainer = &corev1.Container{ Name: "sample", Image: "debian:latest", Command: []string{"/bin/bash"}, } - u = workload.NewUpdater("cloud-sql-proxy-operator/dev") + u = workload.NewUpdater("cloud-sql-proxy-operator/dev", workload.DefaultProxyImage) ) // Create a pod @@ -430,8 +430,8 @@ func TestContainerReplaced(t *testing.T) { []corev1.ContainerPort{{Name: "http", ContainerPort: 8080}} // Create a AuthProxyWorkload that matches the deployment - csqls := []*v1alpha1.AuthProxyWorkload{simpleAuthProxy("instance1", wantsInstanceName)} - csqls[0].Spec.AuthProxyContainer = &v1alpha1.AuthProxyContainerSpec{Container: wantContainer} + csqls := []*cloudsqlapi.AuthProxyWorkload{simpleAuthProxy("instance1", wantsInstanceName)} + csqls[0].Spec.AuthProxyContainer = &cloudsqlapi.AuthProxyContainerSpec{Container: wantContainer} // update the containers err := configureProxies(u, wl, csqls) @@ -461,7 +461,7 @@ func TestContainerReplaced(t *testing.T) { } -func ptr[T int | int32 | int64 | string](i T) *T { +func ptr[T int | int32 | int64 | string | bool](i T) *T { return &i } @@ -475,7 +475,7 @@ func TestResourcesFromSpec(t *testing.T) { }, } - u = workload.NewUpdater("cloud-sql-proxy-operator/dev") + u = workload.NewUpdater("cloud-sql-proxy-operator/dev", workload.DefaultProxyImage) ) // Create a pod @@ -484,8 +484,8 @@ func TestResourcesFromSpec(t *testing.T) { []corev1.ContainerPort{{Name: "http", ContainerPort: 8080}} // Create a AuthProxyWorkload that matches the deployment - csqls := []*v1alpha1.AuthProxyWorkload{simpleAuthProxy("instance1", wantsInstanceName)} - csqls[0].Spec.AuthProxyContainer = &v1alpha1.AuthProxyContainerSpec{Resources: wantResources} + csqls := []*cloudsqlapi.AuthProxyWorkload{simpleAuthProxy("instance1", wantsInstanceName)} + csqls[0].Spec.AuthProxyContainer = &cloudsqlapi.AuthProxyContainerSpec{Resources: wantResources} // update the containers err := configureProxies(u, wl, csqls) @@ -512,23 +512,23 @@ func TestResourcesFromSpec(t *testing.T) { } func TestProxyCLIArgs(t *testing.T) { - type testParam struct { - desc string - proxySpec v1alpha1.AuthProxyWorkloadSpec - wantProxyArgContains []string - wantErrorCodes []string - wantWorkloadEnv map[string]string - } wantTrue := true wantFalse := false var wantPort int32 = 5000 - var testcases = []testParam{ + testcases := []struct { + desc string + proxySpec cloudsqlapi.AuthProxyWorkloadSpec + wantProxyArgContains []string + wantErrorCodes []string + wantWorkloadEnv map[string]string + dontWantEnvSet []string + }{ { desc: "default cli config", - proxySpec: v1alpha1.AuthProxyWorkloadSpec{ - Instances: []v1alpha1.InstanceSpec{{ + proxySpec: cloudsqlapi.AuthProxyWorkloadSpec{ + Instances: []cloudsqlapi.InstanceSpec{{ ConnectionString: "hello:world:db", Port: &wantPort, PortEnvName: "DB_PORT", @@ -544,8 +544,8 @@ func TestProxyCLIArgs(t *testing.T) { }, { desc: "port explicitly set", - proxySpec: v1alpha1.AuthProxyWorkloadSpec{ - Instances: []v1alpha1.InstanceSpec{{ + proxySpec: cloudsqlapi.AuthProxyWorkloadSpec{ + Instances: []cloudsqlapi.InstanceSpec{{ ConnectionString: "hello:world:db", Port: &wantPort, PortEnvName: "DB_PORT", @@ -555,8 +555,8 @@ func TestProxyCLIArgs(t *testing.T) { }, { desc: "port implicitly set and increments", - proxySpec: v1alpha1.AuthProxyWorkloadSpec{ - Instances: []v1alpha1.InstanceSpec{{ + proxySpec: cloudsqlapi.AuthProxyWorkloadSpec{ + Instances: []cloudsqlapi.InstanceSpec{{ ConnectionString: "hello:world:one", PortEnvName: "DB_PORT", }, @@ -571,8 +571,8 @@ func TestProxyCLIArgs(t *testing.T) { }, { desc: "env name conflict causes error", - proxySpec: v1alpha1.AuthProxyWorkloadSpec{ - Instances: []v1alpha1.InstanceSpec{{ + proxySpec: cloudsqlapi.AuthProxyWorkloadSpec{ + Instances: []cloudsqlapi.InstanceSpec{{ ConnectionString: "hello:world:one", PortEnvName: "DB_PORT", }, @@ -584,12 +584,12 @@ func TestProxyCLIArgs(t *testing.T) { wantProxyArgContains: []string{ fmt.Sprintf("hello:world:one?port=%d", workload.DefaultFirstPort), fmt.Sprintf("hello:world:two?port=%d", workload.DefaultFirstPort+1)}, - wantErrorCodes: []string{v1alpha1.ErrorCodeEnvConflict}, + wantErrorCodes: []string{cloudsqlapi.ErrorCodeEnvConflict}, }, { desc: "auto-iam-authn set", - proxySpec: v1alpha1.AuthProxyWorkloadSpec{ - Instances: []v1alpha1.InstanceSpec{{ + proxySpec: cloudsqlapi.AuthProxyWorkloadSpec{ + Instances: []cloudsqlapi.InstanceSpec{{ ConnectionString: "hello:world:one", PortEnvName: "DB_PORT", AutoIAMAuthN: &wantTrue, @@ -606,8 +606,8 @@ func TestProxyCLIArgs(t *testing.T) { }, { desc: "private-ip set", - proxySpec: v1alpha1.AuthProxyWorkloadSpec{ - Instances: []v1alpha1.InstanceSpec{{ + proxySpec: cloudsqlapi.AuthProxyWorkloadSpec{ + Instances: []cloudsqlapi.InstanceSpec{{ ConnectionString: "hello:world:one", PortEnvName: "DB_PORT", PrivateIP: &wantTrue, @@ -624,17 +624,30 @@ func TestProxyCLIArgs(t *testing.T) { }, { desc: "global flags", - proxySpec: v1alpha1.AuthProxyWorkloadSpec{ - AuthProxyContainer: &v1alpha1.AuthProxyContainerSpec{ + proxySpec: cloudsqlapi.AuthProxyWorkloadSpec{ + AuthProxyContainer: &cloudsqlapi.AuthProxyContainerSpec{ SQLAdminAPIEndpoint: "https://example.com", - Telemetry: &v1alpha1.TelemetrySpec{ - HTTPPort: ptr(int32(9092)), + Telemetry: &cloudsqlapi.TelemetrySpec{ + TelemetryPrefix: ptr("telprefix"), + TelemetryProject: ptr("telproject"), + TelemetrySampleRate: ptr(200), + HTTPPort: ptr(int32(9092)), + DisableTraces: &wantTrue, + DisableMetrics: &wantTrue, + Prometheus: &wantTrue, + PrometheusNamespace: ptr("hello"), + QuotaProject: ptr("qp"), + }, + AdminServer: &cloudsqlapi.AdminServerSpec{ + EnableAPIs: []string{"Debug", "QuitQuitQuit"}, + Port: int32(9091), }, MaxConnections: ptr(int64(10)), MaxSigtermDelay: ptr(int64(20)), }, - Instances: []v1alpha1.InstanceSpec{{ + Instances: []cloudsqlapi.InstanceSpec{{ ConnectionString: "hello:world:one", + Port: ptr(int32(5000)), }}, }, wantProxyArgContains: []string{ @@ -642,16 +655,44 @@ func TestProxyCLIArgs(t *testing.T) { }, wantWorkloadEnv: map[string]string{ "CSQL_PROXY_SQLADMIN_API_ENDPOINT": "https://example.com", + "CSQL_PROXY_TELEMETRY_SAMPLE_RATE": "200", + "CSQL_PROXY_PROMETHEUS_NAMESPACE": "hello", + "CSQL_PROXY_TELEMETRY_PROJECT": "telproject", + "CSQL_PROXY_TELEMETRY_PREFIX": "telprefix", "CSQL_PROXY_HTTP_PORT": "9092", + "CSQL_PROXY_ADMIN_PORT": "9091", + "CSQL_PROXY_DEBUG": "true", + "CSQL_PROXY_QUITQUITQUIT": "true", "CSQL_PROXY_HEALTH_CHECK": "true", + "CSQL_PROXY_DISABLE_TRACES": "true", + "CSQL_PROXY_DISABLE_METRICS": "true", + "CSQL_PROXY_PROMETHEUS": "true", + "CSQL_PROXY_QUOTA_PROJECT": "qp", "CSQL_PROXY_MAX_CONNECTIONS": "10", "CSQL_PROXY_MAX_SIGTERM_DELAY": "20", }, }, + { + desc: "No admin port enabled when AdminServerSpec is nil", + proxySpec: cloudsqlapi.AuthProxyWorkloadSpec{ + AuthProxyContainer: &cloudsqlapi.AuthProxyContainerSpec{}, + Instances: []cloudsqlapi.InstanceSpec{{ + ConnectionString: "hello:world:one", + Port: ptr(int32(5000)), + }}, + }, + wantProxyArgContains: []string{ + fmt.Sprintf("hello:world:one?port=%d", workload.DefaultFirstPort), + }, + wantWorkloadEnv: map[string]string{ + "CSQL_PROXY_HEALTH_CHECK": "true", + }, + dontWantEnvSet: []string{"CSQL_PROXY_DEBUG", "CSQL_PROXY_ADMIN_PORT"}, + }, { desc: "port conflict with other instance causes error", - proxySpec: v1alpha1.AuthProxyWorkloadSpec{ - Instances: []v1alpha1.InstanceSpec{{ + proxySpec: cloudsqlapi.AuthProxyWorkloadSpec{ + Instances: []cloudsqlapi.InstanceSpec{{ ConnectionString: "hello:world:one", PortEnvName: "DB_PORT_1", Port: ptr(int32(8081)), @@ -665,12 +706,12 @@ func TestProxyCLIArgs(t *testing.T) { wantProxyArgContains: []string{ fmt.Sprintf("hello:world:one?port=%d", 8081), fmt.Sprintf("hello:world:two?port=%d", 8081)}, - wantErrorCodes: []string{v1alpha1.ErrorCodePortConflict}, + wantErrorCodes: []string{cloudsqlapi.ErrorCodePortConflict}, }, { desc: "port conflict with workload container", - proxySpec: v1alpha1.AuthProxyWorkloadSpec{ - Instances: []v1alpha1.InstanceSpec{{ + proxySpec: cloudsqlapi.AuthProxyWorkloadSpec{ + Instances: []cloudsqlapi.InstanceSpec{{ ConnectionString: "hello:world:one", PortEnvName: "DB_PORT_1", Port: ptr(int32(8080)), @@ -678,14 +719,13 @@ func TestProxyCLIArgs(t *testing.T) { }, wantProxyArgContains: []string{ fmt.Sprintf("hello:world:one?port=%d", 8080)}, - wantErrorCodes: []string{v1alpha1.ErrorCodePortConflict}, + wantErrorCodes: []string{cloudsqlapi.ErrorCodePortConflict}, }, } - for i := 0; i < len(testcases); i++ { - tc := &testcases[i] + for _, tc := range testcases { t.Run(tc.desc, func(t *testing.T) { - u := workload.NewUpdater("cloud-sql-proxy-operator/dev") + u := workload.NewUpdater("cloud-sql-proxy-operator/dev", workload.DefaultProxyImage) // Create a pod wl := &workload.PodWorkload{Pod: &corev1.Pod{ @@ -698,7 +738,13 @@ func TestProxyCLIArgs(t *testing.T) { }} // Create a AuthProxyWorkload that matches the deployment - csqls := []*v1alpha1.AuthProxyWorkload{authProxyWorkloadFromSpec("instance1", tc.proxySpec)} + csqls := []*cloudsqlapi.AuthProxyWorkload{authProxyWorkloadFromSpec("instance1", tc.proxySpec)} + + // ensure valid + err := csqls[0].ValidateCreate() + if err != nil { + t.Fatal("Invalid AuthProxyWorkload resource", err) + } // update the containers updateErr := configureProxies(u, wl, csqls) @@ -734,6 +780,13 @@ func TestProxyCLIArgs(t *testing.T) { t.Errorf("got %v, wants %v workload env var %v", gotEnvVar, wantValue, wantKey) } } + for _, dontWantKey := range tc.dontWantEnvSet { + gotEnvVar, err := findEnvVar(wl, csqlContainer.Name, dontWantKey) + if err != nil { + continue + } + t.Errorf("got env %v=%v, wants no env var set", dontWantKey, gotEnvVar) + } }) } @@ -804,11 +857,11 @@ func TestPodTemplateAnnotations(t *testing.T) { now = metav1.Now() wantAnnotations = map[string]string{ - "cloudsql.cloud.google.com/instance1": "1", - "cloudsql.cloud.google.com/instance2": "2", + "cloudsql.cloud.google.com/instance1": "1," + workload.DefaultProxyImage, + "cloudsql.cloud.google.com/instance2": "2," + workload.DefaultProxyImage, } - u = workload.NewUpdater("cloud-sql-proxy-operator/dev") + u = workload.NewUpdater("cloud-sql-proxy-operator/dev", workload.DefaultProxyImage) ) // Create a pod @@ -817,7 +870,7 @@ func TestPodTemplateAnnotations(t *testing.T) { []corev1.ContainerPort{{Name: "http", ContainerPort: 8080}} // Create a AuthProxyWorkload that matches the deployment - csqls := []*v1alpha1.AuthProxyWorkload{ + csqls := []*cloudsqlapi.AuthProxyWorkload{ simpleAuthProxy("instance1", "project:server:db"), simpleAuthProxy("instance2", "project:server2:db2"), simpleAuthProxy("instance3", "project:server3:db3")} @@ -842,12 +895,12 @@ func TestPodTemplateAnnotations(t *testing.T) { func TestPodAnnotation(t *testing.T) { now := metav1.Now() - server := &v1alpha1.AuthProxyWorkload{ObjectMeta: metav1.ObjectMeta{Name: "instance1", Generation: 1}} - deletedServer := &v1alpha1.AuthProxyWorkload{ObjectMeta: metav1.ObjectMeta{Name: "instance2", Generation: 2, DeletionTimestamp: &now}} + server := &cloudsqlapi.AuthProxyWorkload{ObjectMeta: metav1.ObjectMeta{Name: "instance1", Generation: 1}} + deletedServer := &cloudsqlapi.AuthProxyWorkload{ObjectMeta: metav1.ObjectMeta{Name: "instance2", Generation: 2, DeletionTimestamp: &now}} var testcases = []struct { name string - r *v1alpha1.AuthProxyWorkload + r *cloudsqlapi.AuthProxyWorkload wantK string wantV string }{ @@ -855,17 +908,17 @@ func TestPodAnnotation(t *testing.T) { name: "instance1", r: server, wantK: "cloudsql.cloud.google.com/instance1", - wantV: "1", + wantV: fmt.Sprintf("1,%s", workload.DefaultProxyImage), }, { name: "instance2", r: deletedServer, wantK: "cloudsql.cloud.google.com/instance2", - wantV: fmt.Sprintf("2-deleted-%s", now.Format(time.RFC3339)), + wantV: fmt.Sprintf("2-deleted-%s,%s", now.Format(time.RFC3339), workload.DefaultProxyImage), }, } for _, tc := range testcases { - gotK, gotV := workload.PodAnnotation(tc.r) + gotK, gotV := workload.PodAnnotation(tc.r, workload.DefaultProxyImage) if tc.wantK != gotK { t.Errorf("got %v, want %v for key", gotK, tc.wantK) } @@ -889,7 +942,7 @@ func TestWorkloadUnixVolume(t *testing.T) { wantWorkloadEnv = map[string]string{ "DB_SOCKET_PATH": wantsUnixSocketPath, } - u = workload.NewUpdater("authproxyworkload/dev") + u = workload.NewUpdater("authproxyworkload/dev", workload.DefaultProxyImage) ) // Create a pod @@ -898,8 +951,8 @@ func TestWorkloadUnixVolume(t *testing.T) { []corev1.ContainerPort{{Name: "http", ContainerPort: 8080}} // Create a AuthProxyWorkload that matches the deployment - csqls := []*v1alpha1.AuthProxyWorkload{ - authProxyWorkload("instance1", []v1alpha1.InstanceSpec{{ + csqls := []*cloudsqlapi.AuthProxyWorkload{ + authProxyWorkload("instance1", []cloudsqlapi.InstanceSpec{{ ConnectionString: wantsInstanceName, UnixSocketPath: wantsUnixSocketPath, UnixSocketPathEnvName: "DB_SOCKET_PATH", diff --git a/internal/workload/workload.go b/internal/workload/workload.go index fdec69cd..1b8b45c5 100644 --- a/internal/workload/workload.go +++ b/internal/workload/workload.go @@ -17,7 +17,7 @@ package workload import ( "fmt" - cloudsqlapi "github.com/GoogleCloudPlatform/cloud-sql-proxy-operator/internal/api/v1alpha1" + cloudsqlapi "github.com/GoogleCloudPlatform/cloud-sql-proxy-operator/internal/api/v1" appsv1 "k8s.io/api/apps/v1" batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" diff --git a/internal/workload/workload_test.go b/internal/workload/workload_test.go index 2a12806a..19564141 100644 --- a/internal/workload/workload_test.go +++ b/internal/workload/workload_test.go @@ -18,7 +18,7 @@ import ( "os" "testing" - cloudsqlapi "github.com/GoogleCloudPlatform/cloud-sql-proxy-operator/internal/api/v1alpha1" + cloudsqlapi "github.com/GoogleCloudPlatform/cloud-sql-proxy-operator/internal/api/v1" "go.uber.org/zap/zapcore" appsv1 "k8s.io/api/apps/v1" batchv1 "k8s.io/api/batch/v1" diff --git a/main.go b/main.go index ffefb148..4d0982f6 100644 --- a/main.go +++ b/main.go @@ -20,6 +20,8 @@ import ( "runtime" "github.com/GoogleCloudPlatform/cloud-sql-proxy-operator/internal/controller" + "github.com/GoogleCloudPlatform/cloud-sql-proxy-operator/internal/workload" + // Import all Kubernetes client auth plugins (e.g. Azure, GCP, OIDC, etc.) // to ensure that exec-entrypoint and run can make use of them. _ "k8s.io/client-go/plugin/pkg/client/auth" @@ -87,7 +89,7 @@ func main() { os.Exit(1) } - err = controller.SetupManagers(mgr, userAgent) + err = controller.SetupManagers(mgr, userAgent, workload.DefaultProxyImage) if err != nil { setupLog.Error(err, "unable to set up the controllers") os.Exit(1) diff --git a/tests/e2e_test.go b/tests/e2e_test.go index 32a702e4..31754997 100644 --- a/tests/e2e_test.go +++ b/tests/e2e_test.go @@ -15,6 +15,7 @@ package tests import ( + "context" "fmt" "os" "strings" @@ -25,7 +26,6 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" - "sigs.k8s.io/controller-runtime/pkg/client" ) func TestMain(m *testing.M) { @@ -68,32 +68,32 @@ func TestProxyAppliedOnNewWorkload(t *testing.T) { tests := []struct { name string - o client.Object + o workload.Workload allOrAny string }{ { name: "deployment", - o: testhelpers.BuildDeployment(types.NamespacedName{}, "busybox"), + o: &workload.DeploymentWorkload{Deployment: testhelpers.BuildDeployment(types.NamespacedName{}, "busybox")}, allOrAny: "all", }, { name: "statefulset", - o: testhelpers.BuildStatefulSet(types.NamespacedName{}, "busybox"), + o: &workload.StatefulSetWorkload{StatefulSet: testhelpers.BuildStatefulSet(types.NamespacedName{}, "busybox")}, allOrAny: "all", }, { name: "daemonset", - o: testhelpers.BuildDaemonSet(types.NamespacedName{}, "busybox"), + o: &workload.DaemonSetWorkload{DaemonSet: testhelpers.BuildDaemonSet(types.NamespacedName{}, "busybox")}, allOrAny: "all", }, { name: "job", - o: testhelpers.BuildJob(types.NamespacedName{}, "busybox"), + o: &workload.JobWorkload{Job: testhelpers.BuildJob(types.NamespacedName{}, "busybox")}, allOrAny: "any", }, { name: "cronjob", - o: testhelpers.BuildCronJob(types.NamespacedName{}, "busybox"), + o: &workload.CronJobWorkload{CronJob: testhelpers.BuildCronJob(types.NamespacedName{}, "busybox")}, allOrAny: "any", }, } @@ -103,7 +103,7 @@ func TestProxyAppliedOnNewWorkload(t *testing.T) { t.Parallel() ctx := testContext() - kind := test.o.GetObjectKind().GroupVersionKind().Kind + kind := test.o.Object().GetObjectKind().GroupVersionKind().Kind tp := newPublicPostgresClient("new" + strings.ToLower(kind)) err := tp.CreateOrPatchNamespace(ctx) @@ -140,15 +140,11 @@ func TestProxyAppliedOnNewWorkload(t *testing.T) { } t.Log("Creating ", kind) - test.o.SetNamespace(tp.Namespace) - test.o.SetName(test.name) - err = tp.CreateWorkload(ctx, test.o) + err = createWorkload(ctx, tp, test.o, test.name) if err != nil { t.Fatal("unable to create ", kind, err) } - selector := &metav1.LabelSelector{ - MatchLabels: map[string]string{"app": appLabel}, - } + selector := appSelector(appLabel) t.Log("Checking for container counts", kind) err = tp.ExpectPodContainerCount(ctx, selector, 2, test.allOrAny) if err != nil { @@ -227,15 +223,11 @@ func TestProxyAppliedOnExistingWorkload(t *testing.T) { key := types.NamespacedName{Name: pwlName, Namespace: tp.Namespace} t.Log("Creating ", kind) - test.o.Object().SetNamespace(tp.Namespace) - test.o.Object().SetName(test.name) - err = tp.CreateWorkload(ctx, test.o.Object()) + err = createWorkload(ctx, tp, test.o, test.name) if err != nil { t.Fatal("unable to create ", kind, err) } - selector := &metav1.LabelSelector{ - MatchLabels: map[string]string{"app": appLabel}, - } + selector := appSelector(appLabel) err = tp.ExpectPodContainerCount(ctx, selector, 1, test.allOrAny) if err != nil { @@ -373,15 +365,11 @@ func TestPublicDBConnections(t *testing.T) { } t.Log("Creating ", kind) - wl.Object().SetNamespace(tp.Namespace) - wl.Object().SetName(pwlName) - err = tp.CreateWorkload(ctx, wl.Object()) + err = createWorkload(ctx, tp, wl, pwlName) if err != nil { t.Fatal("unable to create ", kind, err) } - selector := &metav1.LabelSelector{ - MatchLabels: map[string]string{"app": appLabel}, - } + selector := appSelector(appLabel) t.Log("Checking for container counts", kind) err = tp.ExpectPodContainerCount(ctx, selector, 2, "all") if err != nil { @@ -454,15 +442,11 @@ func TestUpdateWorkloadOnDelete(t *testing.T) { // Create deployment t.Log("Creating ", kind) - o.SetNamespace(tp.Namespace) - o.SetName(name) - err = tp.CreateWorkload(ctx, o) + err = createWorkload(ctx, tp, wl, name) if err != nil { t.Fatal("unable to create ", kind, err) } - selector := &metav1.LabelSelector{ - MatchLabels: map[string]string{"app": appLabel}, - } + selector := appSelector(appLabel) // Check that the deployment pods are configured with the proxy: pods // have 2 containers. @@ -487,3 +471,104 @@ func TestUpdateWorkloadOnDelete(t *testing.T) { t.Error(err) } } + +func TestPrivateDBConnections(t *testing.T) { + // When running tests during development, set the SKIP_CLEANUP=true envvar so that + // the test namespace remains after the test ends. By default, the test + // namespace will be deleted when the test exits. + skipCleanup := loadValue("SKIP_CLEANUP", "", "false") == "true" + const ( + pwlName = "newss" + appLabel = "client" + kind = "Deployment" + allOrAny = "all" + ) + + ctx := testContext() + tp := newPrivatePostgresClient("postgresconn") + + err := tp.CreateOrPatchNamespace(ctx) + if err != nil { + t.Fatal(err) + } + t.Cleanup(func() { + if skipCleanup { + return + } + + err = tp.DeleteNamespace(ctx) + if err != nil { + t.Fatal(err) + } + }) + + key := types.NamespacedName{Name: pwlName, Namespace: tp.Namespace} + + s := testhelpers.BuildSecret("db-secret", tp.DBRootUsername, tp.DBRootPassword, tp.DBName) + s.SetNamespace(tp.Namespace) + err = tp.Client.Create(ctx, &s) + if err != nil { + t.Fatal(err) + } + + t.Log("Creating AuthProxyWorkload") + b := true + p := testhelpers.NewAuthProxyWorkload(key) + testhelpers.AddUnixInstance(p, tp.ConnectionString, "/var/tests/dbsocket") + tp.ConfigureSelector(p, appLabel, kind) + tp.ConfigureResources(p) + p.Spec.Instances[0].PrivateIP = &b + + err = tp.Create(ctx, p) + if err != nil { + t.Fatal(err) + } + + t.Log("Waiting for AuthProxyWorkload operator to begin the reconcile loop") + _, err = tp.GetAuthProxyWorkloadAfterReconcile(ctx, key) + if err != nil { + t.Fatal("unable to create AuthProxyWorkload", err) + } + + t.Log("Creating ", kind) + wl := &workload.DeploymentWorkload{Deployment: testhelpers.BuildDeployment(types.NamespacedName{}, appLabel)} + wl.Deployment.Spec.Template = testhelpers.BuildPgUnixPodSpec( + 600, appLabel, "db-secret") + err = createWorkload(ctx, tp, wl, pwlName) + if err != nil { + t.Fatal("unable to create ", kind, err) + } + + selector := appSelector(appLabel) + t.Log("Checking for container counts", kind) + err = tp.ExpectPodContainerCount(ctx, selector, 2, "all") + if err != nil { + t.Error(err) + } + + // The pods are configured to only be ready when the real database client + // successfully executes a simple query on the database. + t.Log("Checking for ready", kind) + err = tp.ExpectPodReady(ctx, selector, "all") + if err != nil { + t.Error(err) + } + + t.Log("Done, OK", kind) + +} + +// createWorkload will set name and namespace appropriately, then use the client +// to create the workload. +func createWorkload(ctx context.Context, tp *testhelpers.TestCaseClient, wl workload.Workload, name string) error { + wl.Object().SetNamespace(tp.Namespace) + wl.Object().SetName(name) + return tp.CreateWorkload(ctx, wl.Object()) +} + +// appSelector creates a label selector for "app={appLabel}". +func appSelector(appLabel string) *metav1.LabelSelector { + return &metav1.LabelSelector{ + MatchLabels: map[string]string{"app": appLabel}, + } +} diff --git a/tests/setup_test.go b/tests/setup_test.go index 5b86bf4f..eecbbc73 100644 --- a/tests/setup_test.go +++ b/tests/setup_test.go @@ -47,23 +47,27 @@ var ( })) // These vars hold state initialized by SetupTests. - c client.Client + publicClient client.Client + privateClient client.Client infra testInfra proxyImageURL string operatorURL string ) +func newPrivatePostgresClient(ns string) *testhelpers.TestCaseClient { + return newTestClient(ns, infra.Private.Postgres, privateClient) +} func newPublicPostgresClient(ns string) *testhelpers.TestCaseClient { - return newTestClient(ns, infra.Public.Postgres) + return newTestClient(ns, infra.Public.Postgres, publicClient) } func newPublicMySQLClient(ns string) *testhelpers.TestCaseClient { - return newTestClient(ns, infra.Public.MySQL) + return newTestClient(ns, infra.Public.MySQL, publicClient) } func newPublicMSSQLClient(ns string) *testhelpers.TestCaseClient { - return newTestClient(ns, infra.Public.MSSQL) + return newTestClient(ns, infra.Public.MSSQL, publicClient) } -func newTestClient(ns string, db testDatabase) *testhelpers.TestCaseClient { +func newTestClient(ns string, db testDatabase, c client.Client) *testhelpers.TestCaseClient { return &testhelpers.TestCaseClient{ Client: c, Namespace: testhelpers.NewNamespaceName(ns), @@ -119,35 +123,43 @@ func setupTests() (func(), error) { } infra = ti - setupKubernetesClient(ctx, infra.Public) + publicClient, err = setupKubernetesClient(infra.Public) + privateClient, err = setupKubernetesClient(infra.Private) + waitForAndTailOperatorPods(ctx, publicClient) + waitForAndTailOperatorPods(ctx, privateClient) return cancelFunc, nil } -func setupKubernetesClient(ctx context.Context, ti testEnvironment) error { +func setupKubernetesClient(ti testEnvironment) (client.Client, error) { // Build the kubernetes client config, err := clientcmd.BuildConfigFromFlags("", ti.Kubeconfig) if err != nil { - return fmt.Errorf("unable to build kubernetes client for config %s, %v", ti.Kubeconfig, err) + return nil, fmt.Errorf("unable to build kubernetes client for config %s, %v", ti.Kubeconfig, err) } config.RateLimiter = nil k8sClientSet, err = kubernetes.NewForConfig(config) if err != nil { - return fmt.Errorf("unable to setup e2e kubernetes client %v", err) + return nil, fmt.Errorf("unable to setup e2e kubernetes client %v", err) } s := scheme.Scheme controller.InitScheme(s) - c, err = client.New(config, client.Options{Scheme: s}) + + c, err := client.New(config, client.Options{Scheme: s}) if err != nil { - return fmt.Errorf("Unable to initialize kubernetes client %{v}", err) + return nil, fmt.Errorf("Unable to initialize kubernetes client %{v}", err) } if c == nil { - return fmt.Errorf("Kubernetes client was empty after initialization %v", err) + return nil, fmt.Errorf("Kubernetes client was empty after initialization %v", err) } + return c, nil +} + +func waitForAndTailOperatorPods(ctx context.Context, c client.Client) error { // Check that the e2e k8s cluster is the operator that was last built from // this working directory. - d, err := waitForCorrectOperatorPods(ctx, err) + d, err := waitForCorrectOperatorPods(ctx, c) if err != nil { return fmt.Errorf("unable to find manager deployment %v", err) @@ -167,10 +179,10 @@ func setupKubernetesClient(ctx context.Context, ti testEnvironment) error { return nil } -func waitForCorrectOperatorPods(ctx context.Context, err error) (*appsv1.Deployment, error) { +func waitForCorrectOperatorPods(ctx context.Context, c client.Client) (*appsv1.Deployment, error) { deployment := &appsv1.Deployment{} managerDeploymentKey := client.ObjectKey{Namespace: "cloud-sql-proxy-operator-system", Name: "cloud-sql-proxy-operator-controller-manager"} - err = testhelpers.RetryUntilSuccess(5, testhelpers.DefaultRetryInterval, func() error { + err := testhelpers.RetryUntilSuccess(5, testhelpers.DefaultRetryInterval, func() error { // Fetch the deployment err := c.Get(ctx, managerDeploymentKey, deployment) if err != nil { @@ -273,7 +285,8 @@ type testDatabase struct { } type testInfra struct { - Public testEnvironment `json:"public"` + Public testEnvironment `json:"public"` + Private testEnvironment `json:"private"` } type testEnvironment struct { diff --git a/tools/config-crd-ref-docs.yaml b/tools/config-crd-ref-docs.yaml new file mode 100644 index 00000000..fa133d58 --- /dev/null +++ b/tools/config-crd-ref-docs.yaml @@ -0,0 +1,35 @@ +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +## +# The configuration file for the crd-ref-docs tool. This tool generates API +# documentation in markdown for the AuthProxyWorkload CRD from the go source +# code. See the make target `generate_crd_docs`. + +processor: + # Ignore lists and status entities. We don't need to include those in the doc + ignoreTypes: + - "(AuthProxyWorkload)List$" + - "(AuthProxyWorkload)Status$" + # Ignore status TypeMeta fields + ignoreFields: + - "status$" + - "TypeMeta$" + # skip debug fields on AuthProxyContainerSpec + - "^Container$" + - "^SQLAdminAPIEndpoint" + useRawDocstring: false +render: + # Version of Kubernetes to use when generating links to Kubernetes API documentation. + kubernetesVersion: 1.24 diff --git a/tools/e2e_test_job.sh b/tools/e2e_test_job.sh old mode 100644 new mode 100755 index d2c30954..17b2bb45 --- a/tools/e2e_test_job.sh +++ b/tools/e2e_test_job.sh @@ -20,61 +20,61 @@ echo "TIME: $(date) Begin Script" set -euxo -E2E_PROJECT_ID=cloud-sql-operator-testing - -echo "TIME: $(date) Install GCC" -# Install GCC and other essential build tools -apt-get update -apt-get install -y zip unzip build-essential - - -# Install and configure GCloud CLI -echo "TIME: $(date) Install GCloud CLI" mkdir -p bin -curl -L -o bin/gcloud-cli.tar.gz https://dl.google.com/dl/cloudsdk/channels/rapid/downloads/google-cloud-cli-413.0.0-linux-x86_64.tar.gz -( cd bin && tar -zxf gcloud-cli.tar.gz ) -./bin/google-cloud-sdk/bin/gcloud config set project "$E2E_PROJECT_ID" -./bin/google-cloud-sdk/bin/gcloud config set compute/zone "us-central1" -export PATH=$PATH:$PWD/bin/google-cloud-sdk/bin -which gcloud + +echo "Using installed gcloud" +gcloud version gcloud components install --quiet gke-gcloud-auth-plugin # Install helm -echo "TIME: $(date) Install Helm" -curl -L -o bin/helm.tar.gz https://get.helm.sh/helm-v3.10.3-linux-amd64.tar.gz -( cd bin && tar -zxf helm.tar.gz && ls -al) -export PATH=$PATH:$PWD/bin/linux-amd64 -which helm +if ! which helm ; then + echo "TIME: $(date) Install Helm" + curl -L -o bin/helm.tar.gz https://get.helm.sh/helm-v3.10.3-linux-amd64.tar.gz + ( cd bin && tar -zxf helm.tar.gz && ls -al) + export PATH=$PATH:$PWD/bin/linux-amd64 + which helm +else + echo "Using installed helm" + helm version +fi -# Install go -echo "TIME: $(date) Install Go" -curl -L -o bin/go.tar.gz https://go.dev/dl/go1.20.linux-amd64.tar.gz -rm -rf /usr/local/go && tar -C /usr/local -xzf bin/go.tar.gz -export PATH=$PATH:/usr/local/go/bin +echo "Using installed go" go version # Set the e2e test project id and other params from # the Cloud Build environment echo "TIME: $(date) Configure Make Env" +# Don't log the command, it contains secrets +set +x cat > build.env < bin/e2e_test.log 2>&1 ; then + echo "STATUS: E2E Test Passed" test_exit_code=0 else - echo "E2E Test Failed" + echo "STATUS: E2E Test Failed" test_exit_code=1 fi +if test -f bin/e2e_test.log ; then + # Upload full e2e log to the storage bucket + gcloud storage cp bin/e2e_test.log "gs://$TFSTATE_STORAGE_BUCKET/github-action-log/run-$GITHUB_RUN_ID.log" + echo "Uploaded full e2e log to /github-action-logs/run-$GITHUB_RUN_ID.log" + + # Print go test results to stdout, ignore error + egrep '(---)|(github.com/GoogleCloudPlatform/cloud-sql-proxy-operator/tests)' bin/e2e_test.log || true +fi + echo "TIME: $(date) Done" exit $test_exit_code diff --git a/tools/helm-install-certmanager.sh b/tools/helm-install-certmanager.sh new file mode 100755 index 00000000..1b1465a1 --- /dev/null +++ b/tools/helm-install-certmanager.sh @@ -0,0 +1,53 @@ +#!/usr/bin/env bash +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# Configure script to fail on any command error +set -euxo pipefail + +# Find project directory, cd to project directory +SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) +PROJECT_DIR=$( dirname "$SCRIPT_DIR") +cd "$PROJECT_DIR" + +# Validate input environment variables +#expects KUBECONFIG to be set by the caller +if [[ -z "${KUBECONFIG:-}" ]]; then + echo "expects KUBECONFIG to be the path to the kubeconfig file for kubectl." + exit 1 +fi + +#expects CERT_MANAGER_VERSION to be set by the caller +if [[ -z "${CERT_MANAGER_VERSION:-}" ]]; then + echo "expects CERT_MANAGER_VERSION to be set the version of cert manager to install." + exit 1 +fi + +helm repo add jetstack https://charts.jetstack.io --kubeconfig "${KUBECONFIG}" +helm repo update --kubeconfig "${KUBECONFIG}" + +if helm get all -n cert-manager cert-manager --kubeconfig "${KUBECONFIG}" > /dev/null ; then + action="upgrade" +else + action="install" +fi + +helm --kubeconfig "${KUBECONFIG}" "$action" \ + cert-manager jetstack/cert-manager \ + --namespace cert-manager \ + --version "${CERT_MANAGER_VERSION}" \ + --create-namespace \ + --set global.leaderElection.namespace=cert-manager \ + --set installCRDs=true diff --git a/version.txt b/version.txt index 0d91a54c..1d0ba9ea 100644 --- a/version.txt +++ b/version.txt @@ -1 +1 @@ -0.3.0 +0.4.0