From 0f8009d621a75ab0a01421cd15cbc8a9d077d60d Mon Sep 17 00:00:00 2001 From: "Jonathan Hess (he/him)" <103529393+hessjcg@users.noreply.github.com> Date: Wed, 18 Jan 2023 11:46:08 -0700 Subject: [PATCH 01/29] chore: consolidate lint and built jobs since they mostly overlap. (#177) This combines the lint and build jobs into a single build job. This will make our PR checks more reliable and probably faster too. Currently, when lint and built start at the same time, it overwhelms the github rate limit, causing one or both jobs to fail. By combining the jobs, we reduce the calls to github by half, which will solve the issue. --- .github/workflows/build.yaml | 2 ++ .github/workflows/lint.yaml | 56 ------------------------------------ 2 files changed, 2 insertions(+), 56 deletions(-) delete mode 100644 .github/workflows/lint.yaml diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index ae953129..bdb11e27 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -50,5 +50,7 @@ jobs: repository: ${{ github.event.pull_request.head.repo.full_name }} - name: Set up build.env with phony secrets. run: cp build.sample.env build.env + - name: Verify no changes from code generation. If this check has failed, run `make generate` and commit the changes. + run: "make generate \nif ! git diff --exit-code --quiet ; then \n echo\n echo\n git diff --stat \"HEAD\"\n echo\n echo\n echo 'ERROR: Lint tools caused changes to the working dir. '\n exit 1\nfi\n" - name: make test run: make test diff --git a/.github/workflows/lint.yaml b/.github/workflows/lint.yaml deleted file mode 100644 index 86a2bb80..00000000 --- a/.github/workflows/lint.yaml +++ /dev/null @@ -1,56 +0,0 @@ -# Copyright 2022 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -name: lint -on: - pull_request: - pull_request_target: - types: [labeled] -jobs: - lint: - if: "${{ github.event.action != 'labeled' || github.event.label.name == 'tests: run' }}" - name: run lint - runs-on: ubuntu-latest - steps: - - name: Remove PR Label - if: "${{ github.event.action == 'labeled' && github.event.label.name == 'tests: run' }}" - uses: actions/github-script@v6 - with: - github-token: ${{ secrets.GITHUB_TOKEN }} - script: | - try { - await github.rest.issues.removeLabel({ - name: 'tests: run', - owner: context.repo.owner, - repo: context.repo.repo, - issue_number: context.payload.pull_request.number - }); - } catch (e) { - console.log('Failed to remove label. Another job may have already removed it!'); - } - - name: Setup Go - uses: actions/setup-go@v3 - with: - go-version: '1.19' - - name: Install goimports - run: go install golang.org/x/tools/cmd/goimports@latest - - name: Checkout code - uses: actions/checkout@v3 - with: - ref: ${{ github.event.pull_request.head.sha }} - repository: ${{ github.event.pull_request.head.repo.full_name }} - - name: Set up build.env with phony secrets. - run: cp build.sample.env build.env - - name: Verify no changes from lint tools. If you're reading this and the check has failed, run `make generate`. - run: "make generate \nif ! git diff --exit-code --quiet ; then \n echo\n echo\n git diff --stat \"HEAD\"\n echo\n echo\n echo 'ERROR: Lint tools caused changes to the working dir. '\n exit 1\nfi\n" From 7e07249aaf2e3c2780b9161fc4f1772e376ce5b3 Mon Sep 17 00:00:00 2001 From: "Jonathan Hess (he/him)" <103529393+hessjcg@users.noreply.github.com> Date: Wed, 18 Jan 2023 12:15:03 -0700 Subject: [PATCH 02/29] doc: update readme to point to latest release, update version next dev. (#176) --- README.md | 6 +++--- installer/cloud-sql-proxy-operator.yaml | 2 +- installer/install.sh | 4 ++-- version.txt | 2 +- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/README.md b/README.md index 81e1e956..01bcd3cb 100644 --- a/README.md +++ b/README.md @@ -43,7 +43,7 @@ Run the following command to install the cloud sql proxy operator into your kubernetes cluster: ```shell -kubectl apply -f https://storage.googleapis.com/cloud-sql-connectors/cloud-sql-proxy-operator/v0.1.0/cloud-sql-proxy-operator.yaml +kubectl apply -f https://storage.googleapis.com/cloud-sql-connectors/cloud-sql-proxy-operator/v0.2.0/cloud-sql-proxy-operator.yaml ``` Confirm that the operator is installed and running by listing its pods: @@ -96,10 +96,10 @@ considered publicly unsupported. ## Contributing -Contributions are welcome. Please, see the [CONTRIBUTING][contributing] document +Contributions are welcome. Please, see the [Contributing](docs/contributing.md) document for details. Please note that this project is released with a Contributor Code of Conduct. By participating in this project you agree to abide by its terms. See -[Contributor Code of Conduct][code-of-conduct] for more information. +[Contributor Code of Conduct](docs/code-of-conduct.md) for more information. diff --git a/installer/cloud-sql-proxy-operator.yaml b/installer/cloud-sql-proxy-operator.yaml index 3f50d2fb..a6aa0bf0 100644 --- a/installer/cloud-sql-proxy-operator.yaml +++ b/installer/cloud-sql-proxy-operator.yaml @@ -1361,7 +1361,7 @@ spec: - --leader-elect command: - /manager - image: gcr.io/cloud-sql-connectors/cloud-sql-operator/cloud-sql-proxy-operator:0.2.0 + image: gcr.io/cloud-sql-connectors/cloud-sql-operator/cloud-sql-proxy-operator:0.3.0-dev livenessProbe: httpGet: path: /healthz diff --git a/installer/install.sh b/installer/install.sh index 6b8f6297..c91dd2d4 100644 --- a/installer/install.sh +++ b/installer/install.sh @@ -16,8 +16,8 @@ set -euxo # exit 1 from the script when command fails -# If CSQL_OPERATOR_VERSION is not set, use the release version: v0.2.0. -CSQL_OPERATOR_VERSION="${CSQL_OPERATOR_VERSION:-v0.2.0}" +# If CSQL_OPERATOR_VERSION is not set, use the release version: v0.3.0-dev. +CSQL_OPERATOR_VERSION="${CSQL_OPERATOR_VERSION:-v0.3.0-dev}" # If CSQL_CERT_MANAGER_VERSION is not set, use the default: v1.9.1. CSQL_CERT_MANAGER_VERSION="${CSQL_CERT_MANAGER_VERSION:-v1.9.1}" diff --git a/version.txt b/version.txt index 0ea3a944..d5109100 100644 --- a/version.txt +++ b/version.txt @@ -1 +1 @@ -0.2.0 +0.3.0-dev From 41a14d7bcd7351c63501429b21deccad9a25b89b Mon Sep 17 00:00:00 2001 From: Mend Renovate Date: Thu, 19 Jan 2023 15:32:30 +0000 Subject: [PATCH 03/29] chore(deps): update terraform google to v4.49.0 (#173) This PR contains the following updates: Package Type Update Change google (source) required_provider minor 4.48.0 -> 4.49.0 --- testinfra/main.tf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/testinfra/main.tf b/testinfra/main.tf index 941a86bd..58dcafa2 100644 --- a/testinfra/main.tf +++ b/testinfra/main.tf @@ -18,7 +18,7 @@ terraform { required_providers { google = { source = "hashicorp/google" - version = "4.48.0" + version = "4.49.0" } } } From 8f43657a6e039db0e3c8c57be56ec8d68ee503e9 Mon Sep 17 00:00:00 2001 From: "Jonathan Hess (he/him)" <103529393+hessjcg@users.noreply.github.com> Date: Thu, 19 Jan 2023 16:01:03 -0700 Subject: [PATCH 04/29] feat: separate terraform for project setup and permissions (#179) This moves the terraform code that sets up the GCP project APIs and permissions into a separate terraform project. This will simplify the automated e2e testing jobs. Related to #65 --- infra/permissions/main.tf | 94 +++++++++++++++++++++++++++++++++++++++ infra/permissions/vars.tf | 43 ++++++++++++++++++ 2 files changed, 137 insertions(+) create mode 100644 infra/permissions/main.tf create mode 100644 infra/permissions/vars.tf diff --git a/infra/permissions/main.tf b/infra/permissions/main.tf new file mode 100644 index 00000000..a88e2d67 --- /dev/null +++ b/infra/permissions/main.tf @@ -0,0 +1,94 @@ +/** + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +terraform { + required_providers { + google = { + source = "hashicorp/google" + version = "4.48.0" + } + } +} + + +provider "google" { +} + +# Enable gcloud project APIs +locals { + project_services = toset([ + "compute.googleapis.com", + "container.googleapis.com", + "artifactregistry.googleapis.com", + "deploymentmanager.googleapis.com", + "dns.googleapis.com", + "logging.googleapis.com", + "monitoring.googleapis.com", + "oslogin.googleapis.com", + "pubsub.googleapis.com", + "replicapool.googleapis.com", + "replicapoolupdater.googleapis.com", + "resourceviews.googleapis.com", + "servicemanagement.googleapis.com", + "sql-component.googleapis.com", + "sqladmin.googleapis.com", + "storage-api.googleapis.com"]) +} + +resource "google_project_service" "project" { + for_each = local.project_services + project = var.project_id + service = each.value +} + +# Create service accounts for k8s workload nodes +resource "google_service_account" "node_pool" { + account_id = "k8s-nodes-${var.environment_name}" + display_name = "Kubernetes provider SA" + project = var.project_id +} +resource "google_project_iam_member" "allow_image_pull" { + project = var.project_id + role = "roles/artifactregistry.reader" + member = "serviceAccount:${google_service_account.node_pool.email}" +} + +resource "google_project_iam_binding" "cloud_sql_client" { + project = var.project_id + role = "roles/cloudsql.client" + members = [ + "serviceAccount:${google_service_account.node_pool.email}" + ] +} + +## +# This is how you do an output file containing terraform data for use by +# a subsequent script. + +# First, create the output data structure as a local variable +locals { + tf_output = { + project_id = var.project_id + environment_name = var.environment_name + nodepool_serviceaccount_email = google_service_account.node_pool.email + } +} + +# Then write the output data to a local file in json format +resource "local_file" "tf_output" { + content = jsonencode(local.tf_output) + filename = var.output_json_path +} diff --git a/infra/permissions/vars.tf b/infra/permissions/vars.tf new file mode 100644 index 00000000..0ae20627 --- /dev/null +++ b/infra/permissions/vars.tf @@ -0,0 +1,43 @@ +/** + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +variable "project_id" { + type = string + description = "The gcloud project id" +} + +variable "environment_name" { + type = string + description = "The name of the environment to create, a single gcp project can host many test environments" +} + +variable "output_json_path" { + type = string + description = "The path to save output.json file. This contains the values created by this project" +} + +variable "gcloud_bin" { + type = string + description = "The absolute path to the gcloud executable" +} + +variable "gcloud_zone" { + default = "us-central1-c" +} + +variable "gcloud_region" { + default = "us-central1" +} From 0140592b3a19087cc5ee769b542ae461f3a5d1b4 Mon Sep 17 00:00:00 2001 From: "Jonathan Hess (he/him)" <103529393+hessjcg@users.noreply.github.com> Date: Fri, 20 Jan 2023 14:06:17 -0700 Subject: [PATCH 05/29] feat: Add new terraform project for e2e test resources (#181) This is a copy of the resource provisioning code from testinfra/*.tf with this modification: IAM and permissions related terraform code is in the infra/permissions terraform project. Related to #65 --- .github/header-checker-lint.yml | 1 + infra/resources/.gitignore | 6 ++ infra/resources/artifacts.tf | 33 ++++++++ infra/resources/database.tf | 53 +++++++++++++ infra/resources/gke_cluster.tf | 133 ++++++++++++++++++++++++++++++++ infra/resources/main.tf | 56 ++++++++++++++ infra/resources/vars.tf | 69 +++++++++++++++++ 7 files changed, 351 insertions(+) create mode 100644 infra/resources/.gitignore create mode 100644 infra/resources/artifacts.tf create mode 100644 infra/resources/database.tf create mode 100644 infra/resources/gke_cluster.tf create mode 100644 infra/resources/main.tf create mode 100644 infra/resources/vars.tf diff --git a/.github/header-checker-lint.yml b/.github/header-checker-lint.yml index ece99198..68ddca6f 100644 --- a/.github/header-checker-lint.yml +++ b/.github/header-checker-lint.yml @@ -20,3 +20,4 @@ sourceFileExtensions: - 'go' - 'yaml' - 'yml' + - 'tf' diff --git a/infra/resources/.gitignore b/infra/resources/.gitignore new file mode 100644 index 00000000..4b4ceac5 --- /dev/null +++ b/infra/resources/.gitignore @@ -0,0 +1,6 @@ +.terraform +terraform.tfstate +terraform.tfstate.backup +.terraform.lock* +secrets.tfvars +.terraform.tfstate.lock.info diff --git a/infra/resources/artifacts.tf b/infra/resources/artifacts.tf new file mode 100644 index 00000000..bbf18513 --- /dev/null +++ b/infra/resources/artifacts.tf @@ -0,0 +1,33 @@ +/** + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +resource "google_artifact_registry_repository" "artifact_repo" { + location = var.gcloud_region + repository_id = "test${var.environment_name}" + description = "Operator test artifact repo" + format = "DOCKER" + project = var.project_id + labels = local.standard_labels +} + +// example: us-central1-docker.pkg.dev/csql-operator-test/test76e6d646e2caac1c458c +resource "local_file" "artifact_repo_url" { + content = join("/", [ + "${google_artifact_registry_repository.artifact_repo.location}-docker.pkg.dev", + google_artifact_registry_repository.artifact_repo.project, + google_artifact_registry_repository.artifact_repo.name]) + filename = var.gcloud_docker_url_file +} diff --git a/infra/resources/database.tf b/infra/resources/database.tf new file mode 100644 index 00000000..b653f16d --- /dev/null +++ b/infra/resources/database.tf @@ -0,0 +1,53 @@ +/** + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +resource "random_id" "db_name" { + byte_length = 10 +} + +resource "random_id" "db_password" { + byte_length = 10 +} + +# See versions at https://registry.terraform.io/providers/hashicorp/google/latest/docs/resources/sql_database_instance#database_version +resource "google_sql_database_instance" "instance" { + name = "inst${random_id.db_name.hex}" + project = var.project_id + region = var.gcloud_region + database_version = "POSTGRES_13" + settings { + tier = "db-f1-micro" + user_labels = local.standard_labels + } + deletion_protection = "true" + root_password = random_id.db_password.hex +} + +resource "google_sql_database" "db" { + name = "db" + instance = google_sql_database_instance.instance.name + project = var.project_id +} + +output "db_root_password" { + value = random_id.db_password.hex +} +output "db_instance_name" { + value = google_sql_database_instance.instance.name +} +output "db_database_name" { + value = google_sql_database.db.name +} diff --git a/infra/resources/gke_cluster.tf b/infra/resources/gke_cluster.tf new file mode 100644 index 00000000..f20e218d --- /dev/null +++ b/infra/resources/gke_cluster.tf @@ -0,0 +1,133 @@ +/** + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +# From https://github.com/hashicorp/terraform-provider-kubernetes/blob/main/kubernetes/test-infra/gke/main.tf + +data "google_client_config" "default" { +} + +data "google_container_engine_versions" "supported" { + location = var.gcloud_zone + version_prefix = var.kubernetes_version + project = var.project_id +} + +resource "google_container_cluster" "primary" { + project = var.project_id + name = "operator-test-${var.environment_name}" + location = var.gcloud_zone + min_master_version = data.google_container_engine_versions.supported.latest_master_version + initial_node_count = 2 + + // Alpha features are disabled by default and can be enabled by GKE for a particular GKE control plane version. + // Creating an alpha cluster enables all alpha features by default. + // Ref: https://cloud.google.com/kubernetes-engine/docs/concepts/feature-gates + enable_kubernetes_alpha = var.enable_alpha + + // disalbe the default nodepool and specify node pools as + // separate terraform resources. This way if we + // change the nodepool config, we don't delete the cluster too + remove_default_node_pool = true + resource_labels = local.standard_labels + +} + +resource "google_container_node_pool" "primary_preemptible_nodes" { + name = "operator-test-nodes-${var.environment_name}" + cluster = google_container_cluster.primary.id + initial_node_count = var.workers_count + version = data.google_container_engine_versions.supported.latest_node_version + location = var.gcloud_zone + + autoscaling { + max_node_count = 10 + min_node_count = 2 + } + + management { + auto_repair = var.enable_alpha ? false : true + auto_upgrade = var.enable_alpha ? false : true + } + + node_config { + preemptible = true + machine_type = "e2-standard-8" + resource_labels = local.standard_labels + + # Google recommends custom service accounts that have cloud-platform scope and permissions granted via IAM Roles. + service_account = var.nodepool_serviceaccount_email + oauth_scopes = [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/logging.write", + "https://www.googleapis.com/auth/monitoring", + "https://www.googleapis.com/auth/sqlservice.admin", + ] + } +} + + +locals { + # This is the recommended way to produce a kubeconfig file from + # the Google Cloud GKE terraform resource. + kubeconfig = { + apiVersion = "v1" + kind = "Config" + preferences = { + colors = true + } + current-context = google_container_cluster.primary.name + contexts = [ + { + name = google_container_cluster.primary.name + context = { + cluster = google_container_cluster.primary.name + user = var.nodepool_serviceaccount_email + namespace = "default" + } + } + ] + clusters = [ + { + name = google_container_cluster.primary.name + cluster = { + server = "https://${google_container_cluster.primary.endpoint}" + certificate-authority-data = google_container_cluster.primary.master_auth[0].cluster_ca_certificate + } + } + ] + users = [ + { + name = var.nodepool_serviceaccount_email + user = { + exec = { + apiVersion = "client.authentication.k8s.io/v1beta1" + command = "gke-gcloud-auth-plugin" + installHint = "Install gke-gcloud-auth-plugin for use with kubectl by following https://cloud.google.com/blog/products/containers-kubernetes/kubectl-auth-changes-in-gke" + provideClusterInfo = true + } + } + } + ] + } + +} + +resource "local_file" "kubeconfig" { + content = yamlencode(local.kubeconfig) + filename = var.kubeconfig_path +} diff --git a/infra/resources/main.tf b/infra/resources/main.tf new file mode 100644 index 00000000..79c91729 --- /dev/null +++ b/infra/resources/main.tf @@ -0,0 +1,56 @@ +/** + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +terraform { + required_providers { + google = { + source = "hashicorp/google" + version = "4.48.0" + } + } +} + + +provider "google" { +} + +# Enable gcloud project APIs +locals { + standard_labels = { + e2e_test_resource = "true" + landscape = var.environment_name + } +} + +## +# This is how you do an output file containing terraform data for use by +# a subsequent script. + +# First, create the output data structure as a local variable +locals { + output_json = { + instance = google_sql_database_instance.instance.connection_name + db = google_sql_database.db.name + rootPassword = random_id.db_password.hex + kubeconfig = var.kubeconfig_path + } +} + +# Then write the output data to a local file in json format +resource "local_file" "testinfra" { + content = jsonencode(local.output_json) + filename = var.output_json_path +} diff --git a/infra/resources/vars.tf b/infra/resources/vars.tf new file mode 100644 index 00000000..f1a4c039 --- /dev/null +++ b/infra/resources/vars.tf @@ -0,0 +1,69 @@ +/** + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +variable "project_id" { + type = string + description = "The gcloud project id" +} + +variable "environment_name" { + type = string + description = "The test environment name" +} + +variable "kubeconfig_path" { + type = string + description = "The path to save the kubeconfig file" +} +variable "output_json_path" { + type = string + description = "The path to save test-infra.json file, input for e2e tests" +} +variable "gcloud_docker_url_file" { + type = string + description = "The path to save the artifact repo url" +} +variable "gcloud_bin" { + type = string + description = "The absolute path to the gcloud executable" +} + +variable "nodepool_serviceaccount_email" { + description = "The service account email address to assign to the nodepool" +} + +variable "kubernetes_version" { + default = "" +} + +variable "workers_count" { + default = "2" +} + +variable "node_machine_type" { + default = "e2-standard-2" +} + +variable "enable_alpha" { + default = false +} + +variable "gcloud_zone" { + default = "us-central1-c" +} +variable "gcloud_region" { + default = "us-central1" +} From 857444ac09b8c1c5c9c3536ed1cab7367f778015 Mon Sep 17 00:00:00 2001 From: "Jonathan Hess (he/him)" <103529393+hessjcg@users.noreply.github.com> Date: Mon, 23 Jan 2023 12:21:40 -0700 Subject: [PATCH 06/29] feat: add script to run terraform with input validation. (#182) Creates a new run.sh script to provision resources for the local development and for e2e build job. Call this script from the Makefile with appropriate values. --- Makefile | 41 ++++++++- infra/run.sh | 239 +++++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 276 insertions(+), 4 deletions(-) create mode 100755 infra/run.sh diff --git a/Makefile b/Makefile index 31095425..8f652c07 100644 --- a/Makefile +++ b/Makefile @@ -119,7 +119,7 @@ go_fmt: # Automatically formats go files go run golang.org/x/tools/cmd/goimports@latest -w . yaml_fmt: # Automatically formats all yaml files - go run github.com/UltiRequiem/yamlfmt@latest -w $(shell find . -iname '*.yaml' -or -iname '*.yml') + go run github.com/UltiRequiem/yamlfmt@latest -w $(shell find . -iname '*.yaml' -or -iname '*.yml' | grep -v -e '^./bin/') .PHONY: add_copyright_header add_copyright_header: # Add the copyright header @@ -223,6 +223,10 @@ installer/install.sh: ## Build install shell script to deploy the operator # GKE cluster. KUBECONFIG_E2E ?= $(PWD)/bin/e2e-kubeconfig.yaml +# This is the file where Terraform will write the kubeconfig.yaml for the +# GKE cluster. +ENVIRONMENT_NAME ?= $(shell whoami) + # kubectl command with proper environment vars set E2E_KUBECTL_ENV = USE_GKE_E2E_AUTH_PLUGIN=True KUBECONFIG=$(KUBECONFIG_E2E) E2E_KUBECTL = $(E2E_KUBECTL_ENV) $(KUBECTL) @@ -254,13 +258,42 @@ e2e_test_clean: e2e_cleanup_test_namespaces e2e_undeploy ## Remove all operator .PHONY: e2e_teardown e2e_teardown: e2e_cluster_destroy ## Remove the test infrastructure for e2e tests from the Google Cloud Project +.PHONY: e2e_test_job +e2e_test_job: e2e_setup_job e2e_build_deploy e2e_test_run + +.PHONY: e2e_setup_job +e2e_setup_job: e2e_project e2e_cluster_job e2e_cert_manager_deploy + .PHONY: e2e_project e2e_project: gcloud # Check that the Google Cloud project exists @gcloud projects describe $(E2E_PROJECT_ID) 2>/dev/null || \ ( echo "No Google Cloud Project $(E2E_PROJECT_ID) found"; exit 1 ) +.PHONY: e2e_cluster_job +e2e_cluster_job: e2e_project terraform # Build infrastructure for e2e tests in the test job + PROJECT_DIR=$(PWD) \ + E2E_PROJECT_ID=$(E2E_PROJECT_ID) \ + KUBECONFIG_E2E=$(KUBECONFIG_E2E) \ + E2E_DOCKER_URL_FILE=$(E2E_DOCKER_URL_FILE) \ + ENVIRONMENT_NAME=$(ENVIRONMENT_NAME) \ + NODEPOOL_SERVICEACCOUNT_EMAIL=$(NODEPOOL_SERVICEACCOUNT_EMAIL) \ + WORKLOAD_ID_SERVICEACCOUNT_EMAIL=$(WORKLOAD_ID_SERVICEACCOUNT_EMAIL) \ + TFSTATE_STORAGE_BUCKET=$(TFSTATE_STORAGE_BUCKET) \ + TESTINFRA_JSON_FILE=$(LOCALBIN)/testinfra.json \ + infra/run.sh apply_e2e_job + +.PHONY: e2e_cluster_new +e2e_cluster_new: e2e_project terraform # Build infrastructure for e2e tests + PROJECT_DIR=$(PWD) \ + E2E_PROJECT_ID=$(E2E_PROJECT_ID) \ + KUBECONFIG_E2E=$(KUBECONFIG_E2E) \ + E2E_DOCKER_URL_FILE=$(E2E_DOCKER_URL_FILE) \ + ENVIRONMENT_NAME=$(ENVIRONMENT_NAME) \ + TESTINFRA_JSON_FILE=$(LOCALBIN)/testinfra.json \ + infra/run.sh apply + .PHONY: e2e_cluster -e2e_cluster: e2e_project terraform # Build infrastructure for e2e tests +e2e_cluster: e2e_project terraform # Build infrastructure for e2e tests (soon to be replaced with e2e_cluster_new implementation) PROJECT_DIR=$(PWD) \ E2E_PROJECT_ID=$(E2E_PROJECT_ID) \ KUBECONFIG_E2E=$(KUBECONFIG_E2E) \ @@ -366,8 +399,8 @@ KUSTOMIZE_VERSION ?= v4.5.2 ENVTEST_VERSION ?= latest GOLANGCI_LINT_VERSION ?= latest -GOOS=$(shell go env GOOS | tr -d '\n') -GOARCH=$(shell go env GOARCH | tr -d '\n') +GOOS?=$(shell go env GOOS | tr -d '\n') +GOARCH?=$(shell go env GOARCH | tr -d '\n') remove_tools: rm -rf $(LOCALBIN)/* diff --git a/infra/run.sh b/infra/run.sh new file mode 100755 index 00000000..4a2c0047 --- /dev/null +++ b/infra/run.sh @@ -0,0 +1,239 @@ +#!/usr/bin/env bash +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +### +# run.sh is used by make to launch the terraform scripts as part of the +# end-to-end testing process. This is not intended to be a stand-alone shell +# script. +# +# Usage: +# $ run.sh +# +# This script will do these command: +# +# apply - Build an e2e test environment for the local developer to run e2e tests. +# This will run `terraform apply` on the ./permissions terraform project +# and then on ./resources project, passing the values between projects. +# +# destroy - Tear down the e2e test environment for the local developer. +# This runs `terraform destroy` on ./resources project, removing resources +# from the google cloud project used by the e2e tests. +# +# apply_e2e_job - Build an e2e test environment for the e2e CI jobs to use. +# CI jobs for e2e testing use pre-configured Google Cloud accounts +# that require a slightly different configuration than the e2e environment +# for local development. +# +# This script accepts inputs as environment variables: +# +# PROJECT_DIR - The directory containing the Makefile. +# ENVIRONMENT_NAME - The name of the e2e test environment to act upon. There +# may be many e2e test environments in the same Google Cloud project. +# E2E_PROJECT_ID - The Google Cloud project ID to act upon. +# KUBECONFIG_E2E - The output filename for the kubeconfig json file +# for the kubernetes cluster for the e2e environment. +# E2E_DOCKER_URL_FILE - The output filename for a text file containing the +# URL to the docker container registry for the e2e test environment. +# +# These additional environment variable are used by apply_e2e_job for E2E CI jobs: +# NODEPOOL_SERVICEACCOUNT_EMAIL - the name of the service account to assign +# to the kubernetes cluster node pool. +# WORKLOAD_ID_SERVICEACCOUNT_EMAIL - the name of the service account to use +# when testing workload identity. +# TFSTATE_STORAGE_BUCKET - the name of the Google Cloud Storage bucket to use +# to store the terraform state. + +# Run terraform with appropriate settings +function run_tf() { + subproject=$1 + shift + output_json=$1 + shift + arr=("$@") + + tf_dir="$DATA_DIR/$subproject" + + "$TERRAFORM" -chdir="$tf_dir" init + + "$TERRAFORM" -chdir="$tf_dir" apply -parallelism=5 -auto-approve \ + -var "gcloud_bin=$(which gcloud)" -var "output_json_path=$output_json" "${arr[@]}" +} + +# Apply the terraform for local development e2e tests +function apply() { + run_tf permissions "$DATA_DIR/permissions_out.json" \ + -var "project_id=$E2E_PROJECT_ID" \ + -var "environment_name=$ENVIRONMENT_NAME" + + # Read nodepool_service_acount from the output of the permissions project + nodepool_serviceaccount_email=$(jq -r .nodepool_serviceaccount_email < "$DATA_DIR/permissions_out.json") + + run_tf resources "$TESTINFRA_JSON_FILE" \ + -var "gcloud_docker_url_file=$E2E_DOCKER_URL_FILE" \ + -var "project_id=$E2E_PROJECT_ID" \ + -var "kubeconfig_path=$KUBECONFIG_E2E" \ + -var "environment_name=$ENVIRONMENT_NAME" \ + -var "nodepool_serviceaccount_email=$nodepool_serviceaccount_email" + + gcloud auth configure-docker us-central1-docker.pkg.dev +} + +# Destroy the local development terraform resources +function destroy() { + nodepool_serviceaccount_email=$(jq -r .nodepool_serviceaccount_email < "$DATA_DIR/permissions_out.json") + run_tf resources TESTINFRA_JSON_FILE -destroy \ + -var "gcloud_docker_url_file=$E2E_DOCKER_URL_FILE" \ + -var "project_id=$E2E_PROJECT_ID" \ + -var "kubeconfig_path=$KUBECONFIG_E2E" \ + -var "environment_name=$ENVIRONMENT_NAME" \ + -var "nodepool_serviceaccount_email=$nodepool_serviceaccount_email" +} + +# Apply the terraform resources for the e2e test job +function apply_e2e_job() { + + #expects NODEPOOL_SERVICEACCOUNT_EMAIL to be set by the caller + if [[ -z "${NODEPOOL_SERVICEACCOUNT_EMAIL:-}" ]]; then + echo "expects NODEPOOL_SERVICEACCOUNT_EMAIL to be set the email address for the nodepool service account." + exit 1 + fi + + #expects WORKLOAD_ID_SERVICEACCOUNT_EMAIL to be set by the caller + if [[ -z "${WORKLOAD_ID_SERVICEACCOUNT_EMAIL:-}" ]]; then + echo "expects WORKLOAD_ID_SERVICEACCOUNT_EMAIL to be set the email address for the workload id service account." + exit 1 + fi + + #expects TFSTATE_STORAGE_BUCKET to be set by the caller + if [[ -z "${TFSTATE_STORAGE_BUCKET:-}" ]]; then + echo "expects TFSTATE_STORAGE_BUCKET to be set the name of the cloud storage bucket where state is maintained." + exit 1 + fi + + # Use a remote backend for the state defined in the storage bucket, so that the + # state can be reused between runs + cat > $DATA_DIR/resources/backend.tf < /dev/null && pwd ) +TERRAFORM="$PROJECT_DIR/bin/terraform" +KUBECTL="$PROJECT_DIR/bin/kubectl" + +FAIL="" + +## +# Validate Script Inputs + +#expects $PROJECT_DIR +if [[ -z "$PROJECT_DIR" ]]; then + echo "expects PROJECT_DIR to be set to the root directory of the operator project." + FAIL=1 +fi + +#expects $ENVIRONMENT_NAME +if [[ -z "$ENVIRONMENT_NAME" ]]; then + echo "expects ENVIRONMENT_NAME to be set to the root directory of the operator project." + FAIL=1 +fi + +#expects $E2E_PROJECT_ID +if [[ -z "$E2E_PROJECT_ID" ]]; then + echo "expects E2E_PROJECT_ID to be set to the gcloud project id for testing." + FAIL=1 +fi + +#expects KUBECONFIG_E2E +if [[ -z "$KUBECONFIG_E2E" ]]; then + echo "expects KUBECONFIG_E2E to be set the location where kubeconfig should be written." + FAIL=1 +fi + +#expects $E2E_DOCKER_URL_FILE +if [[ -z "$E2E_DOCKER_URL_FILE" ]]; then + echo "expects E2E_DOCKER_URL_FILE to be set the location where docker url should be written." + FAIL=1 +fi + +#expects TESTINFRA_JSON_FILE +if [[ -z "$TESTINFRA_JSON_FILE" ]]; then + echo "expects TESTINFRA_JSON_FILE to be set the location where test infrastructure output file be written." + FAIL=1 +fi + +ACTION="${1:-}" +shift + +case $ACTION in +"apply") + ;; +"destroy") + ;; +"apply_e2e_job") + ;; + *) + echo "Unknown action: $ACTION" + FAIL=1 + ;; +esac + +if [[ -n "$FAIL" ]] ; then + exit 1 +fi + + +set -euxo + +## +# Run the script + +cd "$SCRIPT_DIR" +DATA_DIR="$PROJECT_DIR/bin/tf-new" +mkdir -p "$DATA_DIR" +cp -r $SCRIPT_DIR/* "$DATA_DIR" + +case $ACTION in +"apply") + apply + ;; +"destroy") + destroy + ;; +"apply_e2e_job") + apply_e2e_job + ;; + *) + echo "Unknown action: $ACTION" + FAIL=1 + ;; +esac + From dc2990c4483d216a31a6cafbf45ebba6936b8c6a Mon Sep 17 00:00:00 2001 From: "Jonathan Hess (he/him)" <103529393+hessjcg@users.noreply.github.com> Date: Tue, 24 Jan 2023 14:24:45 -0700 Subject: [PATCH 07/29] feat: add the e2e test job for Cloud Build (#184) The Cloud Build configuration and scripts needed to run end-to-end tests. In this change, the job works, but must be triggered manually. We will add automatic triggering for this job in future PRs. Related to: #65 --- .build/e2e_test.yaml | 45 +++++++++++++++++++ .github/header-checker-lint.yml | 1 + Makefile | 6 +++ infra/run.sh | 7 +++ tools/e2e_test_job.sh | 80 +++++++++++++++++++++++++++++++++ 5 files changed, 139 insertions(+) create mode 100644 .build/e2e_test.yaml create mode 100644 tools/e2e_test_job.sh diff --git a/.build/e2e_test.yaml b/.build/e2e_test.yaml new file mode 100644 index 00000000..153566dd --- /dev/null +++ b/.build/e2e_test.yaml @@ -0,0 +1,45 @@ +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# +# Run this from your dev environment: +# gcloud builds submit --project=$E2E_TEST_PROJECT --config=.build/e2e_test.yaml --substitutions=_ENVIRONMENT_NAME=adhoc-$USER,_TEST_BUILD_ID=$(tools/build-identifier.sh) +# +steps: + - name: 'gcr.io/cloud-builders/docker' + entrypoint: bash + args: + - ./tools/e2e_test_job.sh + id: 'run_e2e' + secretEnv: ['NODEPOOL_SERVICEACCOUNT_EMAIL', 'WORKLOAD_ID_SERVICEACCOUNT_EMAIL', 'TFSTATE_STORAGE_BUCKET'] +options: + machineType: 'E2_HIGHCPU_8' + env: + - "ENVIRONMENT_NAME=$_ENVIRONMENT_NAME" + - "RELEASE_TEST_BUILD_ID=$_TEST_BUILD_ID" + - "GOOS=linux" + - "GOARCH=amd64" +substitutions: + # _ENVIRONMENT_NAME the name of the test landscape to use. Multiple landscapes may + # be created in the same project + _ENVIRONMENT_NAME: "" + _TEST_BUILD_ID: "" +availableSecrets: + secretManager: + - versionName: projects/cloud-sql-operator-testing/secrets/NODEPOOL_SERVICEACCOUNT_EMAIL/versions/1 + env: 'NODEPOOL_SERVICEACCOUNT_EMAIL' + - versionName: projects/cloud-sql-operator-testing/secrets/WORKLOAD_ID_SERVICEACCOUNT_EMAIL/versions/1 + env: 'WORKLOAD_ID_SERVICEACCOUNT_EMAIL' + - versionName: projects/cloud-sql-operator-testing/secrets/TFSTATE_STORAGE_BUCKET/versions/1 + env: 'TFSTATE_STORAGE_BUCKET' diff --git a/.github/header-checker-lint.yml b/.github/header-checker-lint.yml index 68ddca6f..a7e78958 100644 --- a/.github/header-checker-lint.yml +++ b/.github/header-checker-lint.yml @@ -21,3 +21,4 @@ sourceFileExtensions: - 'yaml' - 'yml' - 'tf' + - 'sh' diff --git a/Makefile b/Makefile index 8f652c07..f214e244 100644 --- a/Makefile +++ b/Makefile @@ -83,21 +83,27 @@ help: ## Display this help. .PHONY: install_tools install_tools: remove_tools all_tools ## Installs all development tools + @echo "TIME: $(shell date) end install tools" .PHONY: generate generate: ctrl_generate ctrl_manifests go_lint tf_lint installer reset_image add_copyright_header go_fmt yaml_fmt ## Runs code generation, format, and validation tools + @echo "TIME: $(shell date) end make generate" .PHONY: build build: generate build_push_docker ## Builds and pushes the docker image to tag defined in envvar IMG + @echo "TIME: $(shell date) end make build" .PHONY: test test: generate go_test ## Run tests (but not internal/teste2e) + @echo "TIME: $(shell date) end make test" .PHONY: deploy deploy: build deploy_with_kubeconfig ## Deploys the operator to the kubernetes cluster using envvar KUBECONFIG. Set $IMG envvar to the image tag. + @echo "TIME: $(shell date) end make deploy" .PHONY: e2e_test e2e_test: e2e_setup e2e_build_deploy e2e_test_run e2e_test_clean ## Run end-to-end tests on Google Cloud GKE + @echo "TIME: $(shell date) end make e2e_test" ## # Development targets diff --git a/infra/run.sh b/infra/run.sh index 4a2c0047..a12767c5 100755 --- a/infra/run.sh +++ b/infra/run.sh @@ -221,6 +221,8 @@ DATA_DIR="$PROJECT_DIR/bin/tf-new" mkdir -p "$DATA_DIR" cp -r $SCRIPT_DIR/* "$DATA_DIR" +echo "TIME: $(date) Start terraform reconcile action $ACTION" + case $ACTION in "apply") apply @@ -237,3 +239,8 @@ case $ACTION in ;; esac +echo "TIME: $(date) End terraform reconcile action $ACTION" + +if [[ -n "$FAIL" ]] ; then + exit 1 +fi diff --git a/tools/e2e_test_job.sh b/tools/e2e_test_job.sh new file mode 100644 index 00000000..9685894c --- /dev/null +++ b/tools/e2e_test_job.sh @@ -0,0 +1,80 @@ +#!/usr/bin/env bash +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +### +# This script is used by the E2E test job defined in .build/e2e_test.yaml +# to prepare the Cloud Build environment and run the end-to-end tests. +# +echo "TIME: $(date) Begin Script" +set -euxo + +E2E_PROJECT_ID=cloud-sql-operator-testing + +echo "TIME: $(date) Install GCC" +# Install GCC and other essential build tools +apt-get update +apt-get install -y zip unzip build-essential + + +# Install and configure GCloud CLI +echo "TIME: $(date) Install GCloud CLI" +mkdir -p bin +curl -L -o bin/gcloud-cli.tar.gz https://dl.google.com/dl/cloudsdk/channels/rapid/downloads/google-cloud-cli-413.0.0-linux-x86_64.tar.gz +( cd bin && tar -zxf gcloud-cli.tar.gz ) +./bin/google-cloud-sdk/bin/gcloud config set project "$E2E_PROJECT_ID" +./bin/google-cloud-sdk/bin/gcloud config set compute/zone "us-central1" +export PATH=$PATH:$PWD/bin/google-cloud-sdk/bin +which gcloud +gcloud components install --quiet gke-gcloud-auth-plugin + +# Install helm +echo "TIME: $(date) Install Helm" +curl -L -o bin/helm.tar.gz https://get.helm.sh/helm-v3.10.3-linux-amd64.tar.gz +( cd bin && tar -zxf helm.tar.gz && ls -al) +export PATH=$PATH:$PWD/bin/linux-amd64 +which helm + +# Install go +echo "TIME: $(date) Install Go" +curl -L -o bin/go.tar.gz https://go.dev/dl/go1.18.10.linux-amd64.tar.gz +rm -rf /usr/local/go && tar -C /usr/local -xzf bin/go.tar.gz +export PATH=$PATH:/usr/local/go/bin +go version + +# Set the e2e test project id and other params from +# the Cloud Build environment +echo "TIME: $(date) Configure Make Env" + +cat > build.env < Date: Wed, 25 Jan 2023 14:33:15 -0700 Subject: [PATCH 08/29] chore: update CODEOWNERS (#185) --- .github/CODEOWNERS | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index d2262b40..650719e6 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -1 +1 @@ -* @GoogleCloudPlatform/infra-db-dpes +* @GoogleCloudPlatform/infra-db-sdk From be9b5a9a5be963cde055d1b07c841b0cc21733c8 Mon Sep 17 00:00:00 2001 From: "Jonathan Hess (he/him)" <103529393+hessjcg@users.noreply.github.com> Date: Fri, 27 Jan 2023 13:41:32 -0700 Subject: [PATCH 09/29] chore: clean up dead terraform code from testinfra (#189) Removes old terraform e2e provisioning code that has been replaced by the refactored new terraform code in infra/* Related to #65 Fixes #65 --- Makefile | 19 ++--- testinfra/.gitignore | 6 -- testinfra/artifacts.tf | 33 --------- testinfra/database.tf | 52 ------------- testinfra/gke_cluster.tf | 154 --------------------------------------- testinfra/main.tf | 76 ------------------- testinfra/run.sh | 71 ------------------ testinfra/vars.tf | 60 --------------- 8 files changed, 7 insertions(+), 464 deletions(-) delete mode 100644 testinfra/.gitignore delete mode 100644 testinfra/artifacts.tf delete mode 100644 testinfra/database.tf delete mode 100644 testinfra/gke_cluster.tf delete mode 100644 testinfra/main.tf delete mode 100755 testinfra/run.sh delete mode 100644 testinfra/vars.tf diff --git a/Makefile b/Makefile index f214e244..c36de67f 100644 --- a/Makefile +++ b/Makefile @@ -155,7 +155,8 @@ go_lint: golangci-lint # Run go lint tools, fail if unchecked errors .PHONY: tf_lint tf_lint: terraform # Run terraform fmt to ensure terraform code is consistent - $(TERRAFORM) -chdir=testinfra fmt + $(TERRAFORM) -chdir=infra/permissions fmt + $(TERRAFORM) -chdir=infra/resources fmt .PHONY: go_test go_test: ctrl_manifests envtest # Run tests (but not internal/teste2e) @@ -288,8 +289,8 @@ e2e_cluster_job: e2e_project terraform # Build infrastructure for e2e tests in t TESTINFRA_JSON_FILE=$(LOCALBIN)/testinfra.json \ infra/run.sh apply_e2e_job -.PHONY: e2e_cluster_new -e2e_cluster_new: e2e_project terraform # Build infrastructure for e2e tests +.PHONY: e2e_cluster +e2e_cluster: e2e_project terraform # Build infrastructure for e2e tests PROJECT_DIR=$(PWD) \ E2E_PROJECT_ID=$(E2E_PROJECT_ID) \ KUBECONFIG_E2E=$(KUBECONFIG_E2E) \ @@ -298,21 +299,15 @@ e2e_cluster_new: e2e_project terraform # Build infrastructure for e2e tests TESTINFRA_JSON_FILE=$(LOCALBIN)/testinfra.json \ infra/run.sh apply -.PHONY: e2e_cluster -e2e_cluster: e2e_project terraform # Build infrastructure for e2e tests (soon to be replaced with e2e_cluster_new implementation) - PROJECT_DIR=$(PWD) \ - E2E_PROJECT_ID=$(E2E_PROJECT_ID) \ - KUBECONFIG_E2E=$(KUBECONFIG_E2E) \ - E2E_DOCKER_URL_FILE=$(E2E_DOCKER_URL_FILE) \ - testinfra/run.sh apply - .PHONY: e2e_cluster_destroy e2e_cluster_destroy: e2e_project terraform # Destroy the infrastructure for e2e tests PROJECT_DIR=$(PWD) \ E2E_PROJECT_ID=$(E2E_PROJECT_ID) \ KUBECONFIG_E2E=$(KUBECONFIG_E2E) \ E2E_DOCKER_URL_FILE=$(E2E_DOCKER_URL_FILE) \ - testinfra/run.sh destroy + ENVIRONMENT_NAME=$(ENVIRONMENT_NAME) \ + TESTINFRA_JSON_FILE=$(LOCALBIN)/testinfra.json \ + infra/run.sh destroy .PHONY: e2e_cert_manager_deploy e2e_cert_manager_deploy: e2e_project helm # Deploy the certificate manager diff --git a/testinfra/.gitignore b/testinfra/.gitignore deleted file mode 100644 index 4b4ceac5..00000000 --- a/testinfra/.gitignore +++ /dev/null @@ -1,6 +0,0 @@ -.terraform -terraform.tfstate -terraform.tfstate.backup -.terraform.lock* -secrets.tfvars -.terraform.tfstate.lock.info diff --git a/testinfra/artifacts.tf b/testinfra/artifacts.tf deleted file mode 100644 index 2fbd94db..00000000 --- a/testinfra/artifacts.tf +++ /dev/null @@ -1,33 +0,0 @@ -/** - * Copyright 2022 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -resource "google_artifact_registry_repository" "artifact_repo" { - location = var.gcloud_region - repository_id = "test${random_id.cluster_name.hex}" - description = "Operator test artifact repo" - format = "DOCKER" - project = var.project_id -} - -// us-central1-docker.pkg.dev/csql-operator-test/test76e6d646e2caac1c458c -resource "local_file" "artifact_repo_url" { - # content = "${google_artifact_registry_repository.artifact_repo.location}-docker.pkg.dev/${google_artifact_registry_repository.artifact_repo.project}/${google_artifact_registry_repository.artifact_repo.name}" - content = join("/", [ - "${google_artifact_registry_repository.artifact_repo.location}-docker.pkg.dev", - google_artifact_registry_repository.artifact_repo.project, - google_artifact_registry_repository.artifact_repo.name]) - filename = var.gcloud_docker_url_file -} diff --git a/testinfra/database.tf b/testinfra/database.tf deleted file mode 100644 index 9c70ba4b..00000000 --- a/testinfra/database.tf +++ /dev/null @@ -1,52 +0,0 @@ -/** - * Copyright 2022 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -resource "random_id" "db_name" { - byte_length = 10 -} - -resource "random_id" "db_password" { - byte_length = 10 -} - -# See versions at https://registry.terraform.io/providers/hashicorp/google/latest/docs/resources/sql_database_instance#database_version -resource "google_sql_database_instance" "instance" { - name = "inst${random_id.db_name.hex}" - project = var.project_id - region = var.gcloud_region - database_version = "POSTGRES_13" - settings { - tier = "db-f1-micro" - } - deletion_protection = "true" - root_password = random_id.db_password.hex -} - -resource "google_sql_database" "db" { - name = "db" - instance = google_sql_database_instance.instance.name - project = var.project_id -} - -output "db_root_password" { - value = random_id.db_password.hex -} -output "db_instance_name" { - value = google_sql_database_instance.instance.name -} -output "db_database_name" { - value = google_sql_database.db.name -} diff --git a/testinfra/gke_cluster.tf b/testinfra/gke_cluster.tf deleted file mode 100644 index 2b63e59b..00000000 --- a/testinfra/gke_cluster.tf +++ /dev/null @@ -1,154 +0,0 @@ -/** - * Copyright 2022 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -# From https://github.com/hashicorp/terraform-provider-kubernetes/blob/main/kubernetes/test-infra/gke/main.tf - -data "google_client_config" "default" { -} - -data "google_container_engine_versions" "supported" { - location = var.gcloud_zone - version_prefix = var.kubernetes_version - project = var.project_id -} - -resource "random_id" "cluster_name" { - byte_length = 10 -} - -resource "google_service_account" "node_pool" { - account_id = "k8s-nodes-${random_id.cluster_name.hex}" - display_name = "Kubernetes provider SA" - project = var.project_id -} - -resource "google_project_iam_binding" "cloud_sql_client" { - project = var.project_id - role = "roles/cloudsql.client" - members = [ - "serviceAccount:${google_service_account.node_pool.email}" - ] -} -resource "google_project_iam_member" "allow_image_pull" { - project = var.project_id - role = "roles/artifactregistry.reader" - member = "serviceAccount:${google_service_account.node_pool.email}" -} - -resource "google_container_cluster" "primary" { - project = var.project_id - name = "operator-test-${random_id.cluster_name.hex}" - location = var.gcloud_zone - min_master_version = data.google_container_engine_versions.supported.latest_master_version - initial_node_count = 2 - - // Alpha features are disabled by default and can be enabled by GKE for a particular GKE control plane version. - // Creating an alpha cluster enables all alpha features by default. - // Ref: https://cloud.google.com/kubernetes-engine/docs/concepts/feature-gates - enable_kubernetes_alpha = var.enable_alpha - - // disalbe the default nodepool and specify node pools as - // separate terraform resources. This way if we - // change the nodepool config, we don't delete the cluster too - remove_default_node_pool = true - -} - -resource "google_container_node_pool" "primary_preemptible_nodes" { - name = "operator-test-nodes-${random_id.cluster_name.hex}" - cluster = google_container_cluster.primary.id - initial_node_count = var.workers_count - version = data.google_container_engine_versions.supported.latest_node_version - location = var.gcloud_zone - - autoscaling { - max_node_count = 10 - min_node_count = 2 - } - - management { - auto_repair = var.enable_alpha ? false : true - auto_upgrade = var.enable_alpha ? false : true - } - - node_config { - preemptible = true - machine_type = "e2-standard-8" - - # Google recommends custom service accounts that have cloud-platform scope and permissions granted via IAM Roles. - service_account = google_service_account.node_pool.email - oauth_scopes = [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/devstorage.read_only", - "https://www.googleapis.com/auth/logging.write", - "https://www.googleapis.com/auth/monitoring", - "https://www.googleapis.com/auth/sqlservice.admin", - ] - } -} - - -locals { - # This is the recommended way to produce a kubeconfig file from - # the Google Cloud GKE terraform resource. - kubeconfig = { - apiVersion = "v1" - kind = "Config" - preferences = { - colors = true - } - current-context = google_container_cluster.primary.name - contexts = [ - { - name = google_container_cluster.primary.name - context = { - cluster = google_container_cluster.primary.name - user = google_service_account.node_pool.email - namespace = "default" - } - } - ] - clusters = [ - { - name = google_container_cluster.primary.name - cluster = { - server = "https://${google_container_cluster.primary.endpoint}" - certificate-authority-data = google_container_cluster.primary.master_auth[0].cluster_ca_certificate - } - } - ] - users = [ - { - name = google_service_account.node_pool.email - user = { - exec = { - apiVersion = "client.authentication.k8s.io/v1beta1" - command = "gke-gcloud-auth-plugin" - installHint = "Install gke-gcloud-auth-plugin for use with kubectl by following https://cloud.google.com/blog/products/containers-kubernetes/kubectl-auth-changes-in-gke" - provideClusterInfo = true - } - } - } - ] - } - -} - -resource "local_file" "kubeconfig" { - content = yamlencode(local.kubeconfig) - filename = var.kubeconfig_path -} diff --git a/testinfra/main.tf b/testinfra/main.tf deleted file mode 100644 index 58dcafa2..00000000 --- a/testinfra/main.tf +++ /dev/null @@ -1,76 +0,0 @@ -/** - * Copyright 2022 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -terraform { - required_providers { - google = { - source = "hashicorp/google" - version = "4.49.0" - } - } -} - - -provider "google" { -} - -# Enable gcloud project APIs -locals { - project_services = toset([ - "compute.googleapis.com", - "container.googleapis.com", # GKE - "artifactregistry.googleapis.com", - "deploymentmanager.googleapis.com", - "dns.googleapis.com", - "logging.googleapis.com", - "monitoring.googleapis.com", - "oslogin.googleapis.com", - "pubsub.googleapis.com", - "replicapool.googleapis.com", - "replicapoolupdater.googleapis.com", - "resourceviews.googleapis.com", - "servicemanagement.googleapis.com", - "sql-component.googleapis.com", - "sqladmin.googleapis.com", - "storage-api.googleapis.com"]) -} - -resource "google_project_service" "project" { - for_each = local.project_services - project = var.project_id - service = each.value -} - - -## -# This is how you do an output file containing terraform data for use by -# a subsequent script. - -# First, create the output data structure as a local variable -locals { - testinfra = { - instance = google_sql_database_instance.instance.connection_name - db = google_sql_database.db.name - rootPassword = random_id.db_password.hex - kubeconfig = var.kubeconfig_path - } -} - -# Then write the output data to a local file in json format -resource "local_file" "testinfra" { - content = jsonencode(local.testinfra) - filename = var.testinfra_json_path -} diff --git a/testinfra/run.sh b/testinfra/run.sh deleted file mode 100755 index a30f5887..00000000 --- a/testinfra/run.sh +++ /dev/null @@ -1,71 +0,0 @@ -#!/usr/bin/env bash -# Copyright 2022 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -#expects $PROJECT_DIR -if [[ -z "$PROJECT_DIR" ]]; then - echo "expects PROJECT_DIR to be set to the root directory of the operator project." - exit 1 -fi - -#expects $E2E_PROJECT_ID -if [[ -z "$E2E_PROJECT_ID" ]]; then - echo "expects E2E_PROJECT_ID to be set to the gcloud project id for testing." - exit 1 -fi - -#expects KUBECONFIG_E2E -if [[ -z "KUBECONFIG_E2E" ]]; then - echo "expects KUBECONFIG_E2E to be set the location where kubeconfig should be written." - exit 1 -fi - -#expects $E2E_DOCKER_URL_FILE -if [[ -z "$E2E_DOCKER_URL_FILE" ]]; then - echo "expects E2E_DOCKER_URL_FILE to be set the location where docker url should be written." - exit 1 -fi - -if [[ "${1:-}" == "destroy" ]] ; then - DESTROY="-destroy" -else - DESTROY="" -fi - - -SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) - -TERRAFORM="$PROJECT_DIR/bin/terraform" -KUBECTL="$PROJECT_DIR/bin/kubectl" - -set -euxo - -# Begin terraform setup - -cd "$SCRIPT_DIR" -DATA_DIR="$SCRIPT_DIR/../bin/tf" -mkdir -p "$DATA_DIR" -cp -r $SCRIPT_DIR/* "$DATA_DIR" - -"$TERRAFORM" -chdir="$DATA_DIR" init - -"$TERRAFORM" -chdir="$DATA_DIR" apply $DESTROY -parallelism=5 -auto-approve \ - -var "gcloud_bin=$(which gcloud)" \ - -var "gcloud_docker_url_file=$E2E_DOCKER_URL_FILE" \ - -var "project_id=$E2E_PROJECT_ID" \ - -var "kubeconfig_path=$KUBECONFIG_E2E" \ - -var "testinfra_json_path=$PROJECT_DIR/bin/testinfra.json" - -gcloud auth configure-docker us-central1-docker.pkg.dev diff --git a/testinfra/vars.tf b/testinfra/vars.tf deleted file mode 100644 index 3eb68f98..00000000 --- a/testinfra/vars.tf +++ /dev/null @@ -1,60 +0,0 @@ -/** - * Copyright 2022 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -variable "project_id" { - type = string - description = "The gcloud project id" -} - -variable "kubeconfig_path" { - type = string - description = "The path to save the kubeconfig file" -} -variable "testinfra_json_path" { - type = string - description = "The path to save test-infra.json file, input for e2e tests" -} -variable "gcloud_docker_url_file" { - type = string - description = "The path to save the artifact repo url" -} -variable "gcloud_bin" { - type = string - description = "The absolute path to the gcloud executable" -} - -variable "kubernetes_version" { - default = "" -} - -variable "workers_count" { - default = "2" -} - -variable "node_machine_type" { - default = "e2-standard-2" -} - -variable "enable_alpha" { - default = false -} - -variable "gcloud_zone" { - default = "us-central1-c" -} -variable "gcloud_region" { - default = "us-central1" -} From ef1a26f36b00441ea63e8bebe5b97dd42a896794 Mon Sep 17 00:00:00 2001 From: "Jonathan Hess (he/him)" <103529393+hessjcg@users.noreply.github.com> Date: Mon, 30 Jan 2023 11:38:24 -0700 Subject: [PATCH 10/29] chore: update with latest controller-gen (#193) controller-gen was updated to 0.11.2 --- .../crd/bases/cloudsql.cloud.google.com_authproxyworkloads.yaml | 2 +- installer/cloud-sql-proxy-operator.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/config/crd/bases/cloudsql.cloud.google.com_authproxyworkloads.yaml b/config/crd/bases/cloudsql.cloud.google.com_authproxyworkloads.yaml index ea59b4f9..38ab9154 100644 --- a/config/crd/bases/cloudsql.cloud.google.com_authproxyworkloads.yaml +++ b/config/crd/bases/cloudsql.cloud.google.com_authproxyworkloads.yaml @@ -15,7 +15,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.11.1 + controller-gen.kubebuilder.io/version: v0.11.2 creationTimestamp: null name: authproxyworkloads.cloudsql.cloud.google.com spec: diff --git a/installer/cloud-sql-proxy-operator.yaml b/installer/cloud-sql-proxy-operator.yaml index a6aa0bf0..8293701e 100644 --- a/installer/cloud-sql-proxy-operator.yaml +++ b/installer/cloud-sql-proxy-operator.yaml @@ -24,7 +24,7 @@ kind: CustomResourceDefinition metadata: annotations: cert-manager.io/inject-ca-from: cloud-sql-proxy-operator-system/cloud-sql-proxy-operator-serving-cert - controller-gen.kubebuilder.io/version: v0.11.1 + controller-gen.kubebuilder.io/version: v0.11.2 name: authproxyworkloads.cloudsql.cloud.google.com spec: conversion: From f393a0bdae05c2f8b4c22f6f8af56de0d44a83cd Mon Sep 17 00:00:00 2001 From: "Jonathan Hess (he/him)" <103529393+hessjcg@users.noreply.github.com> Date: Mon, 30 Jan 2023 13:03:49 -0700 Subject: [PATCH 11/29] test: E2E database connection test for postgres (#192) Adds an e2e test that actually connects to the postgres database and executes a query. Related-to: #52 --- docs/examples/deployment-postgres-tcp.yaml | 5 - internal/testhelpers/resources.go | 166 +++++++++++++++++++-- internal/testhelpers/testcases.go | 2 + tests/e2e_test.go | 86 ++++++++++- tests/setup_test.go | 2 + 5 files changed, 244 insertions(+), 17 deletions(-) diff --git a/docs/examples/deployment-postgres-tcp.yaml b/docs/examples/deployment-postgres-tcp.yaml index 3c86a0d7..7e3ab159 100644 --- a/docs/examples/deployment-postgres-tcp.yaml +++ b/docs/examples/deployment-postgres-tcp.yaml @@ -98,11 +98,6 @@ spec: secretKeyRef: name: gke-cloud-sql-operator-demo key: DB_USER - - name: DB_USER - valueFrom: - secretKeyRef: - name: gke-cloud-sql-operator-demo - key: DB_USER - name: PGPASSWORD # The env name PGPASSWORD is specific to the psql command. valueFrom: secretKeyRef: diff --git a/internal/testhelpers/resources.go b/internal/testhelpers/resources.go index 36e6759d..1c59366d 100644 --- a/internal/testhelpers/resources.go +++ b/internal/testhelpers/resources.go @@ -32,11 +32,11 @@ import ( "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" ) -func buildPodTemplateSpec(mainPodSleep int) corev1.PodTemplateSpec { +func buildPodTemplateSpec(mainPodSleep int, appLabel string) corev1.PodTemplateSpec { podCmd := fmt.Sprintf("echo Container 1 is Running ; sleep %d", mainPodSleep) return corev1.PodTemplateSpec{ ObjectMeta: metav1.ObjectMeta{ - Labels: map[string]string{"app": "busyboxon"}, + Labels: map[string]string{"app": appLabel}, }, Spec: corev1.PodSpec{ Containers: []corev1.Container{{ @@ -49,6 +49,90 @@ func buildPodTemplateSpec(mainPodSleep int) corev1.PodTemplateSpec { } } +// BuildSecret creates a Secret object containing database information to be used +// by the pod to connect to the database. +func BuildSecret(secretName, userKey, user, passwordKey, password, dbNameKey, dbName string) corev1.Secret { + return corev1.Secret{ + TypeMeta: metav1.TypeMeta{ + Kind: "Secret", + APIVersion: "v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: secretName, + }, + Type: "Opaque", + Data: map[string][]byte{ + userKey: []byte(user), + passwordKey: []byte(password), + dbNameKey: []byte(dbName), + }, + } +} + +// BuildPgPodSpec creates a podspec specific to Postgres databases that will connect +// and run a trivial query. It also configures the pod's Liveness probe so that +// the pod's `Ready` condition is `Ready` when the database can connect. +func BuildPgPodSpec(mainPodSleep int, appLabel, secretName, userKey, passwordKey, dbNameKey string) corev1.PodTemplateSpec { + podCmd := fmt.Sprintf(`echo Container 1 is Running +sleep 10 +psql --host=$DB_HOST --port=$DB_PORT --username=$DB_USER '--command=select 1' --echo-queries --dbname=$DB_NAME +sleep %d`, mainPodSleep) + + livenessCmd := "psql --host=$DB_HOST --port=$DB_PORT --username=$DB_USER '--command=select 1' --echo-queries --dbname=$DB_NAME" + + return corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{"app": appLabel}, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Name: "db-client-app", + Image: "postgres", + ImagePullPolicy: "IfNotPresent", + Command: []string{"/bin/sh", "-e", "-x", "-c", podCmd}, + LivenessProbe: &corev1.Probe{InitialDelaySeconds: 60, PeriodSeconds: 30, FailureThreshold: 3, + ProbeHandler: corev1.ProbeHandler{ + Exec: &corev1.ExecAction{ + Command: []string{"/bin/sh", "-c", "-e", livenessCmd}, + }, + }, + }, + Env: []corev1.EnvVar{ + { + Name: "DB_USER", + ValueFrom: &corev1.EnvVarSource{ + SecretKeyRef: &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{Name: secretName}, + Key: userKey, + }, + }, + }, + { + Name: "PGPASSWORD", + ValueFrom: &corev1.EnvVarSource{ + SecretKeyRef: &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{Name: secretName}, + Key: passwordKey, + }, + }, + }, + { + Name: "DB_NAME", + ValueFrom: &corev1.EnvVarSource{ + SecretKeyRef: &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{Name: secretName}, + Key: dbNameKey, + }, + }, + }, + }, + }}, + }, + } +} + +// BuildDeployment creates a StatefulSet object with a default pod template +// that will sleep for 1 hour. func BuildDeployment(name types.NamespacedName, appLabel string) *appsv1.Deployment { var two int32 = 2 return &appsv1.Deployment{ @@ -62,13 +146,15 @@ func BuildDeployment(name types.NamespacedName, appLabel string) *appsv1.Deploym Replicas: &two, Strategy: appsv1.DeploymentStrategy{Type: "RollingUpdate"}, Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{"app": "busyboxon"}, + MatchLabels: map[string]string{"app": appLabel}, }, - Template: buildPodTemplateSpec(3600), + Template: buildPodTemplateSpec(3600, appLabel), }, } } +// BuildStatefulSet creates a StatefulSet object with a default pod template +// that will sleep for 1 hour. func BuildStatefulSet(name types.NamespacedName, appLabel string) *appsv1.StatefulSet { var two int32 = 2 return &appsv1.StatefulSet{ @@ -82,13 +168,15 @@ func BuildStatefulSet(name types.NamespacedName, appLabel string) *appsv1.Statef Replicas: &two, UpdateStrategy: appsv1.StatefulSetUpdateStrategy{Type: "RollingUpdate"}, Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{"app": "busyboxon"}, + MatchLabels: map[string]string{"app": appLabel}, }, - Template: buildPodTemplateSpec(3600), + Template: buildPodTemplateSpec(3600, appLabel), }, } } +// BuildDaemonSet creates a DaemonSet object with a default pod template +// that will sleep for 1 hour. func BuildDaemonSet(name types.NamespacedName, appLabel string) *appsv1.DaemonSet { return &appsv1.DaemonSet{ TypeMeta: metav1.TypeMeta{Kind: "DaemonSet", APIVersion: "apps/v1"}, @@ -100,13 +188,15 @@ func BuildDaemonSet(name types.NamespacedName, appLabel string) *appsv1.DaemonSe Spec: appsv1.DaemonSetSpec{ UpdateStrategy: appsv1.DaemonSetUpdateStrategy{Type: "RollingUpdate"}, Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{"app": "busyboxon"}, + MatchLabels: map[string]string{"app": appLabel}, }, - Template: buildPodTemplateSpec(3600), + Template: buildPodTemplateSpec(3600, appLabel), }, } } +// BuildJob creates a Job object with a default pod template +// that will sleep for 30 seconds. func BuildJob(name types.NamespacedName, appLabel string) *batchv1.Job { job := &batchv1.Job{ TypeMeta: metav1.TypeMeta{Kind: "Job", APIVersion: "batch/v1"}, @@ -116,7 +206,7 @@ func BuildJob(name types.NamespacedName, appLabel string) *batchv1.Job { Labels: map[string]string{"app": appLabel}, }, Spec: batchv1.JobSpec{ - Template: buildPodTemplateSpec(30), + Template: buildPodTemplateSpec(30, appLabel), Parallelism: ptr(int32(1)), // run the pod 20 times, 1 at a time Completions: ptr(int32(20)), }, @@ -129,6 +219,8 @@ func ptr[T any](i T) *T { return &i } +// BuildCronJob creates CronJob object with a default pod template +// that will sleep for 60 seconds. func BuildCronJob(name types.NamespacedName, appLabel string) *batchv1.CronJob { job := &batchv1.CronJob{ TypeMeta: metav1.TypeMeta{Kind: "CronJob", APIVersion: "batch/v1"}, @@ -141,7 +233,7 @@ func BuildCronJob(name types.NamespacedName, appLabel string) *batchv1.CronJob { Schedule: "* * * * *", JobTemplate: batchv1.JobTemplateSpec{ Spec: batchv1.JobSpec{ - Template: buildPodTemplateSpec(60), + Template: buildPodTemplateSpec(60, appLabel), }, }, }, @@ -151,6 +243,8 @@ func BuildCronJob(name types.NamespacedName, appLabel string) *batchv1.CronJob { } +// CreateWorkload Creates the workload in Kubernetes, waiting to confirm that +// the workload exists. func (cc *TestCaseClient) CreateWorkload(ctx context.Context, o client.Object) error { err := cc.Client.Create(ctx, o) if err != nil { @@ -271,6 +365,53 @@ func (cc *TestCaseClient) ExpectPodContainerCount(ctx context.Context, podSelect return nil } +// ExpectPodReady finds a deployment and keeps checking until the number of +// containers on the deployment's PodSpec.Containers == count. Returns error after 30 seconds +// if the containers do not match. +func (cc *TestCaseClient) ExpectPodReady(ctx context.Context, podSelector *metav1.LabelSelector, allOrAny string) error { + + var ( + countBadPods int + countPods int + ) + + return RetryUntilSuccess(24, DefaultRetryInterval, func() error { + countBadPods = 0 + pods, err := ListPods(ctx, cc.Client, cc.Namespace, podSelector) + if err != nil { + return err + } + countPods = len(pods.Items) + if len(pods.Items) == 0 { + return fmt.Errorf("got 0 pods, want at least 1 pod") + } + for _, pod := range pods.Items { + if !isPodReady(pod) { + countBadPods++ + } + } + switch { + case allOrAny == "all" && countBadPods > 0: + return fmt.Errorf("got %d pods not ready of %d pods, want 0 pods not ready", countBadPods, len(pods.Items)) + case allOrAny == "any" && countBadPods == countPods: + return fmt.Errorf("got %d pods not ready of %d pods, want at least 1 pod ready ", countBadPods, len(pods.Items)) + default: + return nil + } + }) + +} + +func isPodReady(pod corev1.Pod) bool { + for _, condition := range pod.Status.Conditions { + if condition.Type == corev1.PodReady && + condition.Status == corev1.ConditionTrue { + return true + } + } + return false +} + // ExpectContainerCount finds a deployment and keeps checking until the number of // containers on the deployment's PodSpec.Containers == count. Returns error after 30 seconds // if the containers do not match. @@ -375,6 +516,9 @@ func (cc *TestCaseClient) CreateDeploymentReplicaSetAndPods(ctx context.Context, } return rs, pods, nil } + +// BuildAuthProxyWorkload creates an AuthProxyWorkload object with a +// single connection instance. func BuildAuthProxyWorkload(key types.NamespacedName, connectionString string) *v1alpha1.AuthProxyWorkload { return &v1alpha1.AuthProxyWorkload{ TypeMeta: metav1.TypeMeta{ @@ -388,6 +532,8 @@ func BuildAuthProxyWorkload(key types.NamespacedName, connectionString string) * Spec: v1alpha1.AuthProxyWorkloadSpec{ Instances: []v1alpha1.InstanceSpec{{ ConnectionString: connectionString, + HostEnvName: "DB_HOST", + PortEnvName: "DB_PORT", }}, }, } diff --git a/internal/testhelpers/testcases.go b/internal/testhelpers/testcases.go index f3dab342..992e5c89 100644 --- a/internal/testhelpers/testcases.go +++ b/internal/testhelpers/testcases.go @@ -31,6 +31,8 @@ type TestCaseClient struct { Namespace string ConnectionString string ProxyImageURL string + DBRootPassword string + DBName string } func NewNamespaceName(prefix string) string { diff --git a/tests/e2e_test.go b/tests/e2e_test.go index b533e614..8bde9037 100644 --- a/tests/e2e_test.go +++ b/tests/e2e_test.go @@ -147,7 +147,7 @@ func TestProxyAppliedOnNewWorkload(t *testing.T) { t.Fatal("unable to create ", kind, err) } selector := &metav1.LabelSelector{ - MatchLabels: map[string]string{"app": "busyboxon"}, + MatchLabels: map[string]string{"app": appLabel}, } t.Log("Checking for container counts", kind) err = tp.ExpectPodContainerCount(ctx, selector, 2, test.allOrAny) @@ -234,7 +234,7 @@ func TestProxyAppliedOnExistingWorkload(t *testing.T) { t.Fatal("unable to create ", kind, err) } selector := &metav1.LabelSelector{ - MatchLabels: map[string]string{"app": "busyboxon"}, + MatchLabels: map[string]string{"app": appLabel}, } err = tp.ExpectPodContainerCount(ctx, selector, 1, test.allOrAny) @@ -278,3 +278,85 @@ func TestProxyAppliedOnExistingWorkload(t *testing.T) { }) } } + +func TestPostgresConnection(t *testing.T) { + // When running tests during development, set the SKIP_CLEANUP=true envvar so that + // the test namespace remains after the test ends. By default, the test + // namespace will be deleted when the test exits. + skipCleanup := loadValue("SKIP_CLEANUP", "", "false") == "true" + + ctx := testContext() + + tp := newTestCaseClient("pgconnection") + + err := tp.CreateOrPatchNamespace(ctx) + if err != nil { + t.Fatal(err) + } + t.Cleanup(func() { + if skipCleanup { + return + } + + err = tp.DeleteNamespace(ctx) + if err != nil { + t.Fatal(err) + } + }) + + const ( + pwlName = "newss" + appLabel = "pgsql" + kind = "Deployment" + ) + key := types.NamespacedName{Name: pwlName, Namespace: tp.Namespace} + + s := testhelpers.BuildSecret("db-secret", + "DB_USER", "postgres", + "DB_PASS", tp.DBRootPassword, + "DB_NAME", tp.DBName) + s.SetNamespace(tp.Namespace) + err = tp.Client.Create(ctx, &s) + if err != nil { + t.Fatal(err) + } + + wl := &workload.DeploymentWorkload{Deployment: testhelpers.BuildDeployment(types.NamespacedName{}, appLabel)} + wl.Deployment.Spec.Template = testhelpers.BuildPgPodSpec(600, + appLabel, "db-secret", "DB_USER", "DB_PASS", "DB_NAME") + t.Log("Creating AuthProxyWorkload") + + err = tp.CreateAuthProxyWorkload(ctx, key, appLabel, tp.ConnectionString, kind) + if err != nil { + t.Fatal(err) + } + + t.Log("Waiting for AuthProxyWorkload operator to begin the reconcile loop") + _, err = tp.GetAuthProxyWorkloadAfterReconcile(ctx, key) + if err != nil { + t.Fatal("unable to create AuthProxyWorkload", err) + } + + t.Log("Creating ", kind) + wl.Object().SetNamespace(tp.Namespace) + wl.Object().SetName(pwlName) + err = tp.CreateWorkload(ctx, wl.Object()) + if err != nil { + t.Fatal("unable to create ", kind, err) + } + selector := &metav1.LabelSelector{ + MatchLabels: map[string]string{"app": appLabel}, + } + t.Log("Checking for container counts", kind) + err = tp.ExpectPodContainerCount(ctx, selector, 2, "all") + if err != nil { + t.Error(err) + } + t.Log("Checking for ready", kind) + err = tp.ExpectPodReady(ctx, selector, "all") + if err != nil { + t.Error(err) + } + + t.Log("Done, OK", kind) +} diff --git a/tests/setup_test.go b/tests/setup_test.go index ed43a87e..cd2a06ab 100644 --- a/tests/setup_test.go +++ b/tests/setup_test.go @@ -58,6 +58,8 @@ func newTestCaseClient(ns string) *testhelpers.TestCaseClient { Client: c, Namespace: testhelpers.NewNamespaceName(ns), ConnectionString: infra.InstanceConnectionString, + DBRootPassword: infra.RootPassword, + DBName: infra.DB, ProxyImageURL: proxyImageURL, } } From 26377f1898ec0c2e13d69920bc097cfdc0c07180 Mon Sep 17 00:00:00 2001 From: "Jonathan Hess (he/him)" <103529393+hessjcg@users.noreply.github.com> Date: Tue, 31 Jan 2023 13:32:31 -0700 Subject: [PATCH 12/29] doc: add examples to connect to MySQL and MS SQL Server (#195) Adds additional doc examples demonstrating how to connect to MySQL and MS SQL Server using the Cloud SQL Proxy Operator. This is needed for the quickstart guides that are being published this week. --- docs/examples/deployment-mssql-tcp.yaml | 110 ++++++++++++++++++++++++ docs/examples/deployment-mysql-tcp.yaml | 110 ++++++++++++++++++++++++ 2 files changed, 220 insertions(+) create mode 100644 docs/examples/deployment-mssql-tcp.yaml create mode 100644 docs/examples/deployment-mysql-tcp.yaml diff --git a/docs/examples/deployment-mssql-tcp.yaml b/docs/examples/deployment-mssql-tcp.yaml new file mode 100644 index 00000000..639ce1dd --- /dev/null +++ b/docs/examples/deployment-mssql-tcp.yaml @@ -0,0 +1,110 @@ +# Copyright 2023 Google LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +### +# This example demonstrates how to use environment variables set by the +# Cloud SQL Proxy Operator to connect to your database. + +## +# Create an AuthProxyWorkload to hold the configuration for your +# Cloud SQL Proxy containers. + +apiVersion: cloudsql.cloud.google.com/v1alpha1 +kind: AuthProxyWorkload +metadata: + name: authproxyworkload-sample +spec: + workloadSelector: + kind: "Deployment" # Applies to a "Deployment" + name: "gke-cloud-sql-app" # named 'gke-cloud-sql-app' + instances: + - connectionString: "my-project:us-central1:instance" # from your Cloud SQL Database instance + portEnvName: "DB_PORT" # Will set an env var named 'DB_PORT' to the database port + hostEnvName: "DB_HOST" # Will set an env var named 'DB_HOST' to the proxy's host, 127.0.0.1 +--- +## +# Put the database name, username, and password into a kubernetes secret +# Update the values below as needed for your environment +# +# WARNING: Do not store passwords in a source code file. It is a bad +# way to keep your secrets safe. +# +# Instead, use kubectl to create the secret using an interactive command +# so that your password is not stored in your source code. +# +# kubectl create secret generic gke-cloud-sql-operator-demo \ +# --from-literal=DB_NAME=your_db_name \ +# --from-literal=DB_USER=your_db_user \ +# --from-literal=DB_PASS=your_db_password +# +apiVersion: v1 +kind: Secret +metadata: + name: gke-cloud-sql-operator-demo +type: Opaque +data: + DB_PASS: cGFzc3dvcmQ= # "password" + DB_NAME: cG9zdGdyZXM= # "postgres" + DB_USER: dGVzdHVzZXI= # "testuser" +--- +## +# Create a deployment for your application that uses environment variables +# set by the proxy to connect to the database. +apiVersion: apps/v1 +kind: Deployment +metadata: + name: gke-cloud-sql-app +spec: + selector: + matchLabels: + app: gke-cloud-sql-app + template: + metadata: + labels: + app: gke-cloud-sql-app + spec: + containers: + - name: gke-cloud-sql-app + image: mcr.microsoft.com/mssql-tools + livenessProbe: + initialDelaySeconds: 60 + periodSeconds: 30 + failureThreshold: 3 + exec: + command: ["/bin/sh", "-x", "-c", "/opt/mssql-tools/bin/sqlcmd -S \"tcp:$DB_HOST,$DB_PORT\" -U $DB_USER -P $DB_PASS -Q \"use $DB_NAME ; select 1 ;\""] + command: + - "/bin/sh" + - "-e" + - "-c" + - "sleep 10 ; /opt/mssql-tools/bin/sqlcmd -S \"tcp:$DB_HOST,$DB_PORT\" -U $DB_USER -P $DB_PASS -Q \"use $DB_NAME ; select 1 ;\" ; sleep 3600" + env: + - name: DB_HOST + value: "set-by-operator" + - name: DB_PORT + value: "set-by-operator" + - name: DB_USER + valueFrom: + secretKeyRef: + name: gke-cloud-sql-operator-demo + key: DB_USER + - name: DB_PASS + valueFrom: + secretKeyRef: + name: gke-cloud-sql-operator-demo + key: DB_PASS + - name: DB_NAME + valueFrom: + secretKeyRef: + name: gke-cloud-sql-operator-demo + key: DB_NAME diff --git a/docs/examples/deployment-mysql-tcp.yaml b/docs/examples/deployment-mysql-tcp.yaml new file mode 100644 index 00000000..7c05c6d1 --- /dev/null +++ b/docs/examples/deployment-mysql-tcp.yaml @@ -0,0 +1,110 @@ +# Copyright 2023 Google LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +### +# This example demonstrates how to use environment variables set by the +# Cloud SQL Proxy Operator to connect to your database. + +## +# Create an AuthProxyWorkload to hold the configuration for your +# Cloud SQL Proxy containers. + +apiVersion: cloudsql.cloud.google.com/v1alpha1 +kind: AuthProxyWorkload +metadata: + name: authproxyworkload-sample +spec: + workloadSelector: + kind: "Deployment" # Applies to a "Deployment" + name: "gke-cloud-sql-app" # named 'gke-cloud-sql-app' + instances: + - connectionString: "my-project:us-central1:instance" # from your Cloud SQL Database instance + portEnvName: "DB_PORT" # Will set an env var named 'DB_PORT' to the database port + hostEnvName: "DB_HOST" # Will set an env var named 'DB_HOST' to the proxy's host, 127.0.0.1 +--- +## +# Put the database name, username, and password into a kubernetes secret +# Update the values below as needed for your environment +# +# WARNING: Do not store passwords in a source code file. It is a bad +# way to keep your secrets safe. +# +# Instead, use kubectl to create the secret using an interactive command +# so that your password is not stored in your source code. +# +# kubectl create secret generic gke-cloud-sql-operator-demo \ +# --from-literal=DB_NAME=your_db_name \ +# --from-literal=DB_USER=your_db_user \ +# --from-literal=DB_PASS=your_db_password +# +apiVersion: v1 +kind: Secret +metadata: + name: gke-cloud-sql-operator-demo +type: Opaque +data: + DB_PASS: cGFzc3dvcmQ= # "password" + DB_NAME: cG9zdGdyZXM= # "postgres" + DB_USER: dGVzdHVzZXI= # "testuser" +--- +## +# Create a deployment for your application that uses environment variables +# set by the proxy to connect to the database. +apiVersion: apps/v1 +kind: Deployment +metadata: + name: gke-cloud-sql-app +spec: + selector: + matchLabels: + app: gke-cloud-sql-app + template: + metadata: + labels: + app: gke-cloud-sql-app + spec: + containers: + - name: gke-cloud-sql-app + image: mysql + livenessProbe: + initialDelaySeconds: 60 + periodSeconds: 30 + failureThreshold: 3 + exec: + command: ["/bin/sh", "-x", "-c", "mysql --host=$DB_HOST --port=$DB_PORT --user=$DB_USER --password=$DB_PASS --database=$DB_NAME '--execute=select now()'"] + command: + - "/bin/sh" + - "-e" + - "-c" + - "sleep 10 ; mysql --host=$DB_HOST --port=$DB_PORT --user=$DB_USER --password=$DB_PASS --database=$DB_NAME '--execute=select now() ; sleep 3600" + env: + - name: DB_HOST + value: "set-by-operator" + - name: DB_PORT + value: "set-by-operator" + - name: DB_USER + valueFrom: + secretKeyRef: + name: gke-cloud-sql-operator-demo + key: DB_USER + - name: DB_PASS + valueFrom: + secretKeyRef: + name: gke-cloud-sql-operator-demo + key: DB_PASS + - name: DB_NAME + valueFrom: + secretKeyRef: + name: gke-cloud-sql-operator-demo + key: DB_NAME From 8e990ca700ea920295821d33dc9b4f36c94b0553 Mon Sep 17 00:00:00 2001 From: "Jonathan Hess (he/him)" <103529393+hessjcg@users.noreply.github.com> Date: Thu, 2 Feb 2023 11:51:24 -0700 Subject: [PATCH 13/29] test: Add E2E db connection tests for mysql and mssql (#194) Adds terraform code and E2E test cases that connect and run a query on MySQL and MS SQL Server databases. Related-to: #52 --- infra/resources/database.tf | 57 +++++++++++ infra/resources/main.tf | 25 ++++- internal/testhelpers/resources.go | 86 +++++++++++++--- internal/testhelpers/testcases.go | 1 + tests/e2e_test.go | 165 ++++++++++++++++++------------ tests/setup_test.go | 68 +++++++----- 6 files changed, 293 insertions(+), 109 deletions(-) diff --git a/infra/resources/database.tf b/infra/resources/database.tf index b653f16d..2180c4f3 100644 --- a/infra/resources/database.tf +++ b/infra/resources/database.tf @@ -42,6 +42,63 @@ resource "google_sql_database" "db" { project = var.project_id } +# See versions at https://registry.terraform.io/providers/hashicorp/google/latest/docs/resources/sql_database_instance#database_version +resource "google_sql_database_instance" "mysql" { + name = "mysql${random_id.db_name.hex}" + project = var.project_id + region = var.gcloud_region + database_version = "MYSQL_8_0" + settings { + tier = "db-f1-micro" + user_labels = local.standard_labels + } + deletion_protection = "true" + root_password = random_id.db_password.hex +} + +resource "google_sql_database" "db_mysql" { + name = "db" + instance = google_sql_database_instance.mysql.name + project = var.project_id +} + +resource "google_sql_user" "mysql_user" { + name = "dbuser" + instance = google_sql_database_instance.mysql.name + host = "%" + password = random_id.db_password.hex + project = var.project_id +} + +# See versions at https://registry.terraform.io/providers/hashicorp/google/latest/docs/resources/sql_database_instance#database_version +resource "google_sql_database_instance" "mssql" { + name = "mssql${random_id.db_name.hex}" + project = var.project_id + region = var.gcloud_region + database_version = "SQLSERVER_2019_EXPRESS" + settings { + # SQL Server tier format: "db-custom-$CPU-$MEM" where $CPU is the number of + # cores and $MEM is memory in MiB + tier = "db-custom-2-3840" + user_labels = local.standard_labels + } + deletion_protection = "true" + root_password = random_id.db_password.hex +} + +resource "google_sql_database" "db_mssql" { + name = "db" + instance = google_sql_database_instance.mssql.name + project = var.project_id +} + +resource "google_sql_user" "mssql_user" { + name = "dbuser" + instance = google_sql_database_instance.mssql.name + password = random_id.db_password.hex + project = var.project_id +} + output "db_root_password" { value = random_id.db_password.hex } diff --git a/infra/resources/main.tf b/infra/resources/main.tf index 79c91729..d0982b13 100644 --- a/infra/resources/main.tf +++ b/infra/resources/main.tf @@ -42,10 +42,27 @@ locals { # First, create the output data structure as a local variable locals { output_json = { - instance = google_sql_database_instance.instance.connection_name - db = google_sql_database.db.name - rootPassword = random_id.db_password.hex - kubeconfig = var.kubeconfig_path + public = { + postgres = { + instance = google_sql_database_instance.instance.connection_name + dbName = google_sql_database.db.name + rootUser = "postgres" + rootPassword = random_id.db_password.hex + } + mysql = { + instance = google_sql_database_instance.mysql.connection_name + dbName = google_sql_database.db.name + rootUser = google_sql_user.mysql_user.name + rootPassword = google_sql_user.mysql_user.password + } + mssql = { + instance = google_sql_database_instance.mssql.connection_name + dbName = google_sql_database.db.name + rootUser = google_sql_user.mssql_user.name + rootPassword = google_sql_user.mssql_user.password + } + kubeconfig = var.kubeconfig_path + } } } diff --git a/internal/testhelpers/resources.go b/internal/testhelpers/resources.go index 1c59366d..7b8687a8 100644 --- a/internal/testhelpers/resources.go +++ b/internal/testhelpers/resources.go @@ -26,6 +26,7 @@ import ( appsv1 "k8s.io/api/apps/v1" batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client" @@ -49,9 +50,15 @@ func buildPodTemplateSpec(mainPodSleep int, appLabel string) corev1.PodTemplateS } } +const ( + userKey = "DB_USER" + passwordKey = "DB_PASS" + dbNameKey = "DB_NAME" +) + // BuildSecret creates a Secret object containing database information to be used // by the pod to connect to the database. -func BuildSecret(secretName, userKey, user, passwordKey, password, dbNameKey, dbName string) corev1.Secret { +func BuildSecret(secretName, user, password, dbName string) corev1.Secret { return corev1.Secret{ TypeMeta: metav1.TypeMeta{ Kind: "Secret", @@ -72,28 +79,75 @@ func BuildSecret(secretName, userKey, user, passwordKey, password, dbNameKey, db // BuildPgPodSpec creates a podspec specific to Postgres databases that will connect // and run a trivial query. It also configures the pod's Liveness probe so that // the pod's `Ready` condition is `Ready` when the database can connect. -func BuildPgPodSpec(mainPodSleep int, appLabel, secretName, userKey, passwordKey, dbNameKey string) corev1.PodTemplateSpec { - podCmd := fmt.Sprintf(`echo Container 1 is Running -sleep 10 -psql --host=$DB_HOST --port=$DB_PORT --username=$DB_USER '--command=select 1' --echo-queries --dbname=$DB_NAME -sleep %d`, mainPodSleep) +func BuildPgPodSpec(mainPodSleep int, appLabel, secretName string) corev1.PodTemplateSpec { + const ( + livenessCmd = "psql --host=$DB_HOST --port=$DB_PORT --username=$DB_USER '--command=select 1' --echo-queries --dbname=$DB_NAME" + imageName = "postgres" + passEnvVarName = "PGPASSWORD" + ) + + return buildConnectPodSpec(mainPodSleep, appLabel, secretName, livenessCmd, passEnvVarName, imageName) +} + +// BuildMySQLPodSpec creates a podspec specific to MySQL databases that will connect +// and run a trivial query. It also configures the pod's Liveness probe so that +// the pod's `Ready` condition is `Ready` when the database can connect. +func BuildMySQLPodSpec(mainPodSleep int, appLabel, secretName string) corev1.PodTemplateSpec { + const ( + livenessCmd = "mysql --host=$DB_HOST --port=$DB_PORT --user=$DB_USER --password=$DB_PASS --database=$DB_NAME '--execute=select now()' " + imageName = "mysql" + passEnvVarName = "DB_PASS" + ) + + return buildConnectPodSpec(mainPodSleep, appLabel, secretName, livenessCmd, passEnvVarName, imageName) +} - livenessCmd := "psql --host=$DB_HOST --port=$DB_PORT --username=$DB_USER '--command=select 1' --echo-queries --dbname=$DB_NAME" +// BuildMSSQLPodSpec creates a podspec specific to MySQL databases that will connect +// and run a trivial query. It also configures the pod's Liveness probe so that +// the pod's `Ready` condition is `Ready` when the database can connect. +func BuildMSSQLPodSpec(mainPodSleep int, appLabel, secretName string) corev1.PodTemplateSpec { + const ( + livenessCmd = `/opt/mssql-tools/bin/sqlcmd -S "tcp:$DB_HOST,$DB_PORT" -U $DB_USER -P $DB_PASS -Q "use $DB_NAME ; select 1 ;"` + imageName = "mcr.microsoft.com/mssql-tools" + passEnvVarName = "DB_PASS" + ) + + return buildConnectPodSpec(mainPodSleep, appLabel, secretName, livenessCmd, passEnvVarName, imageName) +} + +func buildConnectPodSpec(mainPodSleep int, appLabel, secretName, livenessCmd, passEnvVarName, imageName string) corev1.PodTemplateSpec { + podCmd := fmt.Sprintf(`echo Container 1 is Running + sleep 30 + %s + sleep %d`, livenessCmd, mainPodSleep) return corev1.PodTemplateSpec{ ObjectMeta: metav1.ObjectMeta{ Labels: map[string]string{"app": appLabel}, }, + Spec: corev1.PodSpec{ Containers: []corev1.Container{{ Name: "db-client-app", - Image: "postgres", + Image: imageName, ImagePullPolicy: "IfNotPresent", Command: []string{"/bin/sh", "-e", "-x", "-c", podCmd}, - LivenessProbe: &corev1.Probe{InitialDelaySeconds: 60, PeriodSeconds: 30, FailureThreshold: 3, + Resources: corev1.ResourceRequirements{ + Requests: map[corev1.ResourceName]resource.Quantity{ + corev1.ResourceCPU: *resource.NewMilliQuantity(500, resource.DecimalExponent), + }, + }, + LivenessProbe: &corev1.Probe{InitialDelaySeconds: 10, PeriodSeconds: 30, FailureThreshold: 3, ProbeHandler: corev1.ProbeHandler{ Exec: &corev1.ExecAction{ - Command: []string{"/bin/sh", "-c", "-e", livenessCmd}, + Command: []string{"/bin/sh", "-e", "-c", livenessCmd}, + }, + }, + }, + ReadinessProbe: &corev1.Probe{InitialDelaySeconds: 10, PeriodSeconds: 30, FailureThreshold: 3, + ProbeHandler: corev1.ProbeHandler{ + Exec: &corev1.ExecAction{ + Command: []string{"/bin/sh", "-e", "-c", livenessCmd}, }, }, }, @@ -108,7 +162,7 @@ sleep %d`, mainPodSleep) }, }, { - Name: "PGPASSWORD", + Name: passEnvVarName, ValueFrom: &corev1.EnvVarSource{ SecretKeyRef: &corev1.SecretKeySelector{ LocalObjectReference: corev1.LocalObjectReference{Name: secretName}, @@ -530,6 +584,7 @@ func BuildAuthProxyWorkload(key types.NamespacedName, connectionString string) * Namespace: key.Namespace, }, Spec: v1alpha1.AuthProxyWorkloadSpec{ + Instances: []v1alpha1.InstanceSpec{{ ConnectionString: connectionString, HostEnvName: "DB_HOST", @@ -548,7 +603,14 @@ func (cc *TestCaseClient) CreateAuthProxyWorkload(ctx context.Context, key types MatchLabels: map[string]string{"app": appLabel}, }, } - proxy.Spec.AuthProxyContainer = &v1alpha1.AuthProxyContainerSpec{Image: cc.ProxyImageURL} + proxy.Spec.AuthProxyContainer = &v1alpha1.AuthProxyContainerSpec{ + Image: cc.ProxyImageURL, + Resources: &corev1.ResourceRequirements{ + Requests: map[corev1.ResourceName]resource.Quantity{ + corev1.ResourceCPU: *resource.NewMilliQuantity(500, resource.DecimalExponent), + }, + }, + } err := cc.Client.Create(ctx, proxy) if err != nil { return fmt.Errorf("Unable to create entity %v", err) diff --git a/internal/testhelpers/testcases.go b/internal/testhelpers/testcases.go index 992e5c89..d27784c3 100644 --- a/internal/testhelpers/testcases.go +++ b/internal/testhelpers/testcases.go @@ -31,6 +31,7 @@ type TestCaseClient struct { Namespace string ConnectionString string ProxyImageURL string + DBRootUsername string DBRootPassword string DBName string } diff --git a/tests/e2e_test.go b/tests/e2e_test.go index 8bde9037..45b8793d 100644 --- a/tests/e2e_test.go +++ b/tests/e2e_test.go @@ -22,6 +22,7 @@ import ( "github.com/GoogleCloudPlatform/cloud-sql-proxy-operator/internal/testhelpers" "github.com/GoogleCloudPlatform/cloud-sql-proxy-operator/internal/workload" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client" @@ -44,7 +45,7 @@ func TestMain(m *testing.M) { func TestCreateAndDeleteResource(t *testing.T) { ctx := testContext() - tcc := newTestCaseClient("create") + tcc := newPublicPostgresClient("create") res, err := tcc.CreateResource(ctx) if err != nil { t.Error(err) @@ -104,7 +105,7 @@ func TestProxyAppliedOnNewWorkload(t *testing.T) { ctx := testContext() kind := test.o.GetObjectKind().GroupVersionKind().Kind - tp := newTestCaseClient("new" + strings.ToLower(kind)) + tp := newPublicPostgresClient("new" + strings.ToLower(kind)) err := tp.CreateOrPatchNamespace(ctx) if err != nil { @@ -203,7 +204,7 @@ func TestProxyAppliedOnExistingWorkload(t *testing.T) { ctx := testContext() kind := test.o.Object().GetObjectKind().GroupVersionKind().Kind - tp := newTestCaseClient("modify" + strings.ToLower(kind)) + tp := newPublicPostgresClient("modify" + strings.ToLower(kind)) err := tp.CreateOrPatchNamespace(ctx) if err != nil { @@ -279,84 +280,112 @@ func TestProxyAppliedOnExistingWorkload(t *testing.T) { } } -func TestPostgresConnection(t *testing.T) { +func TestPublicDBConnections(t *testing.T) { // When running tests during development, set the SKIP_CLEANUP=true envvar so that // the test namespace remains after the test ends. By default, the test // namespace will be deleted when the test exits. skipCleanup := loadValue("SKIP_CLEANUP", "", "false") == "true" - - ctx := testContext() - - tp := newTestCaseClient("pgconnection") - - err := tp.CreateOrPatchNamespace(ctx) - if err != nil { - t.Fatal(err) - } - t.Cleanup(func() { - if skipCleanup { - return - } - - err = tp.DeleteNamespace(ctx) - if err != nil { - t.Fatal(err) - } - }) - const ( pwlName = "newss" - appLabel = "pgsql" + appLabel = "client" kind = "Deployment" ) - key := types.NamespacedName{Name: pwlName, Namespace: tp.Namespace} - - s := testhelpers.BuildSecret("db-secret", - "DB_USER", "postgres", - "DB_PASS", tp.DBRootPassword, - "DB_NAME", tp.DBName) - s.SetNamespace(tp.Namespace) - err = tp.Client.Create(ctx, &s) - if err != nil { - t.Fatal(err) + + tests := []struct { + name string + c *testhelpers.TestCaseClient + podTemplate corev1.PodTemplateSpec + allOrAny string + }{ + { + name: "postgres", + c: newPublicPostgresClient("postgresconn"), + podTemplate: testhelpers.BuildPgPodSpec(600, appLabel, "db-secret"), + allOrAny: "all", + }, + { + name: "mysql", + c: newPublicMySQLClient("mysqlconn"), + podTemplate: testhelpers.BuildMySQLPodSpec(600, appLabel, "db-secret"), + allOrAny: "all", + }, + { + name: "mssql", + c: newPublicMSSQLClient("mssqlconn"), + podTemplate: testhelpers.BuildMSSQLPodSpec(600, appLabel, "db-secret"), + allOrAny: "all", + }, } + for i := range tests { + test := tests[i] + t.Run(test.name, func(t *testing.T) { + t.Parallel() + ctx := testContext() + tp := test.c - wl := &workload.DeploymentWorkload{Deployment: testhelpers.BuildDeployment(types.NamespacedName{}, appLabel)} - wl.Deployment.Spec.Template = testhelpers.BuildPgPodSpec(600, - appLabel, "db-secret", "DB_USER", "DB_PASS", "DB_NAME") - t.Log("Creating AuthProxyWorkload") + err := tp.CreateOrPatchNamespace(ctx) + if err != nil { + t.Fatal(err) + } + t.Cleanup(func() { + if skipCleanup { + return + } - err = tp.CreateAuthProxyWorkload(ctx, key, appLabel, tp.ConnectionString, kind) - if err != nil { - t.Fatal(err) - } + err = tp.DeleteNamespace(ctx) + if err != nil { + t.Fatal(err) + } + }) - t.Log("Waiting for AuthProxyWorkload operator to begin the reconcile loop") - _, err = tp.GetAuthProxyWorkloadAfterReconcile(ctx, key) - if err != nil { - t.Fatal("unable to create AuthProxyWorkload", err) - } + key := types.NamespacedName{Name: pwlName, Namespace: tp.Namespace} - t.Log("Creating ", kind) - wl.Object().SetNamespace(tp.Namespace) - wl.Object().SetName(pwlName) - err = tp.CreateWorkload(ctx, wl.Object()) - if err != nil { - t.Fatal("unable to create ", kind, err) - } - selector := &metav1.LabelSelector{ - MatchLabels: map[string]string{"app": appLabel}, - } - t.Log("Checking for container counts", kind) - err = tp.ExpectPodContainerCount(ctx, selector, 2, "all") - if err != nil { - t.Error(err) - } - t.Log("Checking for ready", kind) - err = tp.ExpectPodReady(ctx, selector, "all") - if err != nil { - t.Error(err) + s := testhelpers.BuildSecret("db-secret", tp.DBRootUsername, tp.DBRootPassword, tp.DBName) + s.SetNamespace(tp.Namespace) + err = tp.Client.Create(ctx, &s) + if err != nil { + t.Fatal(err) + } + + wl := &workload.DeploymentWorkload{Deployment: testhelpers.BuildDeployment(types.NamespacedName{}, appLabel)} + wl.Deployment.Spec.Template = test.podTemplate + t.Log("Creating AuthProxyWorkload") + + err = tp.CreateAuthProxyWorkload(ctx, key, appLabel, tp.ConnectionString, kind) + if err != nil { + t.Fatal(err) + } + + t.Log("Waiting for AuthProxyWorkload operator to begin the reconcile loop") + _, err = tp.GetAuthProxyWorkloadAfterReconcile(ctx, key) + if err != nil { + t.Fatal("unable to create AuthProxyWorkload", err) + } + + t.Log("Creating ", kind) + wl.Object().SetNamespace(tp.Namespace) + wl.Object().SetName(pwlName) + err = tp.CreateWorkload(ctx, wl.Object()) + if err != nil { + t.Fatal("unable to create ", kind, err) + } + selector := &metav1.LabelSelector{ + MatchLabels: map[string]string{"app": appLabel}, + } + t.Log("Checking for container counts", kind) + err = tp.ExpectPodContainerCount(ctx, selector, 2, "all") + if err != nil { + t.Error(err) + } + t.Log("Checking for ready", kind) + err = tp.ExpectPodReady(ctx, selector, "all") + if err != nil { + t.Error(err) + } + + t.Log("Done, OK", kind) + + }) } - t.Log("Done, OK", kind) } diff --git a/tests/setup_test.go b/tests/setup_test.go index cd2a06ab..9244c957 100644 --- a/tests/setup_test.go +++ b/tests/setup_test.go @@ -53,13 +53,24 @@ var ( operatorURL string ) -func newTestCaseClient(ns string) *testhelpers.TestCaseClient { +func newPublicPostgresClient(ns string) *testhelpers.TestCaseClient { + return newTestClient(ns, infra.Public.Postgres) +} +func newPublicMySQLClient(ns string) *testhelpers.TestCaseClient { + return newTestClient(ns, infra.Public.MySQL) +} +func newPublicMSSQLClient(ns string) *testhelpers.TestCaseClient { + return newTestClient(ns, infra.Public.MSSQL) +} + +func newTestClient(ns string, db testDatabase) *testhelpers.TestCaseClient { return &testhelpers.TestCaseClient{ Client: c, Namespace: testhelpers.NewNamespaceName(ns), - ConnectionString: infra.InstanceConnectionString, - DBRootPassword: infra.RootPassword, - DBName: infra.DB, + ConnectionString: db.InstanceConnectionString, + DBRootUsername: db.RootUser, + DBRootPassword: db.RootPassword, + DBName: db.DBName, ProxyImageURL: proxyImageURL, } } @@ -104,38 +115,34 @@ func setupTests() (func(), error) { testInfraPath := loadValue("TEST_INFRA_JSON", "", "../bin/testinfra.json") ti, err := loadTestInfra(testInfraPath) if err != nil { - kubeconfig := "../../bin/e2e-kubeconfig.yaml" - if envKubeConfig, isset := os.LookupEnv("KUBECONFIG"); isset { - kubeconfig = envKubeConfig - } - ti.Kubeconfig = kubeconfig - ti.DB = "db" - ti.InstanceConnectionString = "proj:region:inst" - logger.Info("Test infrastructure not set. Using defaults", - "instance", ti.InstanceConnectionString, - "db", ti.DB, - "kubeconfig", ti.Kubeconfig) + return teardownFunc, err } infra = ti + setupKubernetesClient(ctx, infra.Public) + + return cancelFunc, nil +} + +func setupKubernetesClient(ctx context.Context, ti testEnvironment) error { // Build the kubernetes client config, err := clientcmd.BuildConfigFromFlags("", ti.Kubeconfig) if err != nil { - return teardownFunc, fmt.Errorf("unable to build kubernetes client for config %s, %v", ti.Kubeconfig, err) + return fmt.Errorf("unable to build kubernetes client for config %s, %v", ti.Kubeconfig, err) } config.RateLimiter = nil k8sClientSet, err = kubernetes.NewForConfig(config) if err != nil { - return teardownFunc, fmt.Errorf("unable to setup e2e kubernetes client %v", err) + return fmt.Errorf("unable to setup e2e kubernetes client %v", err) } s := scheme.Scheme controller.InitScheme(s) c, err = client.New(config, client.Options{Scheme: s}) if err != nil { - return teardownFunc, fmt.Errorf("Unable to initialize kubernetes client %{v}", err) + return fmt.Errorf("Unable to initialize kubernetes client %{v}", err) } if c == nil { - return teardownFunc, fmt.Errorf("Kubernetes client was empty after initialization %v", err) + return fmt.Errorf("Kubernetes client was empty after initialization %v", err) } // Check that the e2e k8s cluster is the operator that was last built from @@ -143,7 +150,7 @@ func setupTests() (func(), error) { d, err := waitForCorrectOperatorPods(ctx, err) if err != nil { - return teardownFunc, fmt.Errorf("unable to find manager deployment %v", err) + return fmt.Errorf("unable to find manager deployment %v", err) } // Start the goroutines to tail the logs from the operator deployment. This @@ -151,13 +158,13 @@ func setupTests() (func(), error) { // for the developer to follow. podList, err := testhelpers.ListPods(ctx, c, d.GetNamespace(), d.Spec.Selector) if err != nil { - return teardownFunc, fmt.Errorf("unable to find manager deployment %v", err) + return fmt.Errorf("unable to find manager deployment %v", err) } tailPods(ctx, podList) logger.Info("Setup complete. K8s cluster is running.") - return teardownFunc, nil + return nil } func waitForCorrectOperatorPods(ctx context.Context, err error) (*appsv1.Deployment, error) { @@ -258,11 +265,22 @@ func (l *testSetupLogger) Logf(format string, args ...interface{}) { } func (l *testSetupLogger) Helper() {} -type testInfra struct { +type testDatabase struct { InstanceConnectionString string `json:"instance,omitempty"` - DB string `json:"db,omitempty"` + DBName string `json:"dbName,omitempty"` + RootUser string `json:"rootUser,omitempty"` RootPassword string `json:"rootPassword,omitempty"` - Kubeconfig string `json:"kubeconfig,omitempty"` +} + +type testInfra struct { + Public testEnvironment `json:"public"` +} + +type testEnvironment struct { + Postgres testDatabase `json:"postgres,omitempty"` + MySQL testDatabase `json:"mysql,omitempty"` + MSSQL testDatabase `json:"mssql,omitempty"` + Kubeconfig string `json:"kubeconfig,omitempty"` } func loadTestInfra(testInfraJSON string) (testInfra, error) { From 36937c72ef728e7dd682f4082df1905102459fc2 Mon Sep 17 00:00:00 2001 From: "Jonathan Hess (he/him)" <103529393+hessjcg@users.noreply.github.com> Date: Tue, 7 Feb 2023 14:45:20 -0700 Subject: [PATCH 14/29] chore: update version of controller-gen to 0.11.13 The latest version of controller-gen is now version 0.11.3 --- .../crd/bases/cloudsql.cloud.google.com_authproxyworkloads.yaml | 2 +- installer/cloud-sql-proxy-operator.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/config/crd/bases/cloudsql.cloud.google.com_authproxyworkloads.yaml b/config/crd/bases/cloudsql.cloud.google.com_authproxyworkloads.yaml index 38ab9154..0f1720df 100644 --- a/config/crd/bases/cloudsql.cloud.google.com_authproxyworkloads.yaml +++ b/config/crd/bases/cloudsql.cloud.google.com_authproxyworkloads.yaml @@ -15,7 +15,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.11.2 + controller-gen.kubebuilder.io/version: v0.11.3 creationTimestamp: null name: authproxyworkloads.cloudsql.cloud.google.com spec: diff --git a/installer/cloud-sql-proxy-operator.yaml b/installer/cloud-sql-proxy-operator.yaml index 8293701e..811f4365 100644 --- a/installer/cloud-sql-proxy-operator.yaml +++ b/installer/cloud-sql-proxy-operator.yaml @@ -24,7 +24,7 @@ kind: CustomResourceDefinition metadata: annotations: cert-manager.io/inject-ca-from: cloud-sql-proxy-operator-system/cloud-sql-proxy-operator-serving-cert - controller-gen.kubebuilder.io/version: v0.11.2 + controller-gen.kubebuilder.io/version: v0.11.3 name: authproxyworkloads.cloudsql.cloud.google.com spec: conversion: From 3b0359b68b8d5c0dcd3e306102945c6e608ff095 Mon Sep 17 00:00:00 2001 From: "Jonathan Hess (he/him)" <103529393+hessjcg@users.noreply.github.com> Date: Tue, 7 Feb 2023 15:07:51 -0700 Subject: [PATCH 15/29] feat: Automatically trigger pod rollout for appsv1 resources when AuthProxyWorkload changes. (#197) Given an AuthProxyWorkload resource that has been applied to StatefulSet, DaemonSet, ReplicaSet, and/or Deployment workloads, when that AuthProxyWorkload is updated, the operator will add or modify an annotation on the PodTemplateSpec of that workload, causing k8s to replace the workload's pods following the workload's rollout strategy, thus applying the AuthProxyWorkload's configuration change. Related to #187. --- config/rbac/role.yaml | 10 ++ installer/cloud-sql-proxy-operator.yaml | 10 ++ .../api/v1alpha1/authproxyworkload_types.go | 6 + .../authproxyworkload_controller.go | 123 +++++++++++++++--- .../authproxyworkload_controller_test.go | 50 ++++++- internal/workload/podspec_updates.go | 22 ++++ internal/workload/podspec_updates_test.go | 35 +++++ tests/e2e_test.go | 16 --- 8 files changed, 231 insertions(+), 41 deletions(-) diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index 7f68c2a7..bc0f82c2 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -45,6 +45,16 @@ rules: - get - list - watch + - apiGroups: + - apps + resources: + - daemonsets + - deployments + - replicasets + - statefulsets + verbs: + - patch + - update - apiGroups: - batch resources: diff --git a/installer/cloud-sql-proxy-operator.yaml b/installer/cloud-sql-proxy-operator.yaml index 811f4365..c58e32e7 100644 --- a/installer/cloud-sql-proxy-operator.yaml +++ b/installer/cloud-sql-proxy-operator.yaml @@ -1161,6 +1161,16 @@ rules: - get - list - watch + - apiGroups: + - apps + resources: + - daemonsets + - deployments + - replicasets + - statefulsets + verbs: + - patch + - update - apiGroups: - batch resources: diff --git a/internal/api/v1alpha1/authproxyworkload_types.go b/internal/api/v1alpha1/authproxyworkload_types.go index 86ddfeeb..04c73482 100644 --- a/internal/api/v1alpha1/authproxyworkload_types.go +++ b/internal/api/v1alpha1/authproxyworkload_types.go @@ -45,6 +45,12 @@ const ( // when the resource reconcile has finished running. ReasonFinishedReconcile = "FinishedReconcile" + // ReasonWorkloadNeedsUpdate relates to condition UpToDate, this reason is set + // when the resource reconcile found existing workloads related to this + // AuthProxyWorkload resource that are not yet configured with an up-to-date + // proxy configuration. + ReasonWorkloadNeedsUpdate = "WorkloadNeedsUpdate" + // ReasonNoWorkloadsFound relates to condition UpToDate, this reason is set // when there are no workloads related to this AuthProxyWorkload resource. ReasonNoWorkloadsFound = "NoWorkloadsFound" diff --git a/internal/controller/authproxyworkload_controller.go b/internal/controller/authproxyworkload_controller.go index d5168d43..e3a9ad5f 100644 --- a/internal/controller/authproxyworkload_controller.go +++ b/internal/controller/authproxyworkload_controller.go @@ -94,6 +94,7 @@ func (r *AuthProxyWorkloadReconciler) SetupWithManager(mgr ctrl.Manager) error { Complete(r) } +//+kubebuilder:rbac:groups=apps,resources=deployments;statefulsets;daemonsets;replicasets,verbs=update;patch //+kubebuilder:rbac:groups=apps,resources=*,verbs=get;list;watch //+kubebuilder:rbac:groups=batch,resources=*,verbs=get;list;watch //+kubebuilder:rbac:groups="",resources=*,verbs=get;list;watch @@ -198,19 +199,24 @@ func (r *AuthProxyWorkloadReconciler) doDelete(ctx context.Context, resource *cl // - the condition `UpToDate` status and reason // // States: -// | state | finalizer| fetch err | len(wl) | Name | -// |---------|----------|-----------|--------------|--------------------- | -// | 0 | * | * | * | start | -// | 1.1 | absent | * | * | needs finalizer | -// | 1.2 | present | error | * | can't list workloads | -// | 2.1 | present | nil | == 0 | no workloads to reconcile | -// | 3.1 | present | nil | > 0 | workloads reconciled | +// | state | finalizer| fetch err | len(wl) | outOfDateCount | Name | +// |---------|----------|-----------|---------|----------------|---------------------------------------| +// | 0 | * | * | * | | start | +// | 1.1 | absent | * | * | | needs finalizer | +// | 1.2 | present | error | * | | can't list workloads | +// | 2.1 | present | nil | == 0 | | no workloads to reconcile | +// | 3.1 | present | nil | > 0 | > 0 , err | workload update needed, and failed | +// | 3.2 | present | nil | > 0 | > 0 | workload update needed, and succeeded | +// | 3.3 | present | nil | > 0 | == 0 | workloads reconciled | // -// start ---x -// \---> 1.1 --> (requeue, goto start) -// \---> 1.2 --> (requeue, goto start) -// \---> 2.1 --> (end) -// \---> 3.1 --> (end) +// start ----x +// |---> 1.1 --> (requeue, goto start) +// |---> 1.2 --> (requeue, goto start) +// |---> 2.1 --> (end) +// | +// |---> 3.1 ---> (requeue, goto start) +// |---> 3.2 ---> (requeue, goto start) +// |---> 3.3 ---> (end) func (r *AuthProxyWorkloadReconciler) doCreateUpdate(ctx context.Context, l logr.Logger, resource *cloudsqlapi.AuthProxyWorkload) (ctrl.Result, error) { orig := resource.DeepCopy() var err error @@ -236,36 +242,111 @@ func (r *AuthProxyWorkloadReconciler) doCreateUpdate(ctx context.Context, l logr // State 2.1: When there are no workloads, then mark this as "UpToDate" true, // do not requeue. if len(allWorkloads) == 0 { - return r.reconcileResult(ctx, l, resource, orig, cloudsqlapi.ReasonNoWorkloadsFound, "No workload updates needed") + return r.reconcileResult(ctx, l, resource, orig, cloudsqlapi.ReasonNoWorkloadsFound, "No workload updates needed", true) } - // State 3.1: Workload updates are in progress. Check if the workload updates - // are complete. - // + // State 3.*: Workloads already exist. Some may need to be updated to roll out + // changes. + var outOfDateCount int + for _, wl := range allWorkloads { + wlChanged := r.needsAnnotationUpdate(wl, resource) + if !wlChanged { + continue + } + + outOfDateCount++ + _, err = controllerutil.CreateOrPatch(ctx, r.Client, wl.Object(), func() error { + r.updateAnnotation(wl, resource) + return nil + }) + + // State 3.1 Failed to update one of the workloads PodTemplateSpec annotations, requeue. + if err != nil { + message := fmt.Sprintf("Reconciled %d matching workloads. Error updating workload %v: %v", len(allWorkloads), wl.Object().GetName(), err) + return r.reconcileResult(ctx, l, resource, orig, cloudsqlapi.ReasonWorkloadNeedsUpdate, message, false) + } + + } + + // State 3.2 Successfully updated all workload PodTemplateSpec annotations, requeue + if outOfDateCount > 0 { + message := fmt.Sprintf("Reconciled %d matching workloads. %d workloads need updates", len(allWorkloads), outOfDateCount) + return r.reconcileResult(ctx, l, resource, orig, cloudsqlapi.ReasonWorkloadNeedsUpdate, message, false) + } + + // State 3.3 Workload PodTemplateSpec annotations are all up to date message := fmt.Sprintf("Reconciled %d matching workloads complete", len(allWorkloads)) + return r.reconcileResult(ctx, l, resource, orig, cloudsqlapi.ReasonFinishedReconcile, message, true) +} + +// needsAnnotationUpdate returns true when the workload was annotated with +// a different generation of the resource. +func (r *AuthProxyWorkloadReconciler) needsAnnotationUpdate(wl workload.Workload, resource *cloudsqlapi.AuthProxyWorkload) bool { + + // This workload is not mutable. Ignore it. + if _, ok := wl.(workload.WithMutablePodTemplate); !ok { + return false + } - return r.reconcileResult(ctx, l, resource, orig, cloudsqlapi.ReasonFinishedReconcile, message) + k, v := workload.PodAnnotation(resource) + + // Check if the correct annotation exists + an := wl.PodTemplateAnnotations() + if an != nil && an[k] == v { + return false + } + + return true +} + +// updateAnnotation applies an annotation to the workload for the resource. +func (r *AuthProxyWorkloadReconciler) updateAnnotation(wl workload.Workload, resource *cloudsqlapi.AuthProxyWorkload) { + mpt, ok := wl.(workload.WithMutablePodTemplate) + + // This workload is not mutable. Ignore it. + if !ok { + return + } + + k, v := workload.PodAnnotation(resource) + + // add the annotation if needed... + an := wl.PodTemplateAnnotations() + if an == nil { + an = make(map[string]string) + } + + an[k] = v + mpt.SetPodTemplateAnnotations(an) } // workloadsReconciled State 3.1: If workloads are all up to date, mark the condition // "UpToDate" true and do not requeue. -func (r *AuthProxyWorkloadReconciler) reconcileResult(ctx context.Context, l logr.Logger, resource *cloudsqlapi.AuthProxyWorkload, orig *cloudsqlapi.AuthProxyWorkload, reason, message string) (ctrl.Result, error) { +func (r *AuthProxyWorkloadReconciler) reconcileResult(ctx context.Context, l logr.Logger, resource, orig *cloudsqlapi.AuthProxyWorkload, reason, message string, upToDate bool) (ctrl.Result, error) { + + status := metav1.ConditionFalse + result := requeueNow + if upToDate { + status = metav1.ConditionTrue + result = ctrl.Result{} + } // Workload updates are complete, update the status resource.Status.Conditions = replaceCondition(resource.Status.Conditions, &metav1.Condition{ Type: cloudsqlapi.ConditionUpToDate, - Status: metav1.ConditionTrue, + Status: status, ObservedGeneration: resource.GetGeneration(), Reason: reason, Message: message, }) + err := r.patchAuthProxyWorkloadStatus(ctx, resource, orig) if err != nil { l.Error(err, "Unable to patch status before beginning workloads", "AuthProxyWorkload", resource.GetNamespace()+"/"+resource.GetName()) - return ctrl.Result{}, err + return result, err } - return ctrl.Result{}, nil + return result, nil } // applyFinalizer adds the finalizer so that the operator is notified when diff --git a/internal/controller/authproxyworkload_controller_test.go b/internal/controller/authproxyworkload_controller_test.go index 0f5098a9..2e013fc7 100644 --- a/internal/controller/authproxyworkload_controller_test.go +++ b/internal/controller/authproxyworkload_controller_test.go @@ -146,8 +146,51 @@ func TestReconcileState21BySelector(t *testing.T) { } -func TestReconcileState31(t *testing.T) { - var wantRequeue bool +func TestReconcileState32(t *testing.T) { + wantRequeue := true + wantStatus := metav1.ConditionFalse + wantReason := v1alpha1.ReasonWorkloadNeedsUpdate + + p := testhelpers.BuildAuthProxyWorkload(types.NamespacedName{ + Namespace: "default", + Name: "test", + }, "project:region:db") + p.Generation = 2 + p.Finalizers = []string{finalizerName} + p.Spec.Workload = v1alpha1.WorkloadSelectorSpec{ + Kind: "Deployment", + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"app": "things"}, + }, + } + p.Status.Conditions = []*metav1.Condition{{ + Type: v1alpha1.ConditionUpToDate, + Reason: v1alpha1.ReasonStartedReconcile, + Status: metav1.ConditionFalse, + }} + + // mimic a pod that was updated by the webhook + reqName := v1alpha1.AnnotationPrefix + "/" + p.Name + pod := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "thing", + Namespace: "default", + Labels: map[string]string{"app": "things"}, + }, + Spec: appsv1.DeploymentSpec{Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{Annotations: map[string]string{reqName: "1"}}, + }}, + } + + err := runReconcileTestcase(p, []client.Object{p, pod}, wantRequeue, wantStatus, wantReason) + if err != nil { + t.Fatal(err) + } + +} + +func TestReconcileState33(t *testing.T) { + wantRequeue := false wantStatus := metav1.ConditionTrue wantReason := v1alpha1.ReasonFinishedReconcile @@ -170,8 +213,7 @@ func TestReconcileState31(t *testing.T) { }} // mimic a pod that was updated by the webhook - reqName := v1alpha1.AnnotationPrefix + "/" + - workload.SafePrefixedName("req-", p.Namespace+"-"+p.Name) + reqName := v1alpha1.AnnotationPrefix + "/" + p.Name pod := &appsv1.Deployment{ ObjectMeta: metav1.ObjectMeta{ Name: "thing", diff --git a/internal/workload/podspec_updates.go b/internal/workload/podspec_updates.go index 9637dd79..2a175c64 100644 --- a/internal/workload/podspec_updates.go +++ b/internal/workload/podspec_updates.go @@ -47,6 +47,14 @@ const ( var l = logf.Log.WithName("internal.workload") +// PodAnnotation returns the key and value for an annotation on a pod +// to indicate the specific AuthProxyWorkload resource that was configured +// on the pod. +func PodAnnotation(r *cloudsqlapi.AuthProxyWorkload) (string, string) { + return fmt.Sprintf("%s/%s", cloudsqlapi.AnnotationPrefix, r.Name), + fmt.Sprintf("%d", r.Generation) +} + // Updater holds global state used while reconciling workloads. type Updater struct { // userAgent is the userAgent of the operator @@ -388,6 +396,12 @@ func (s *updateState) update(wl *PodWorkload, matches []*cloudsqlapi.AuthProxyWo } } + // Copy the existing pod annotation map + ann := map[string]string{} + for k, v := range wl.PodTemplateAnnotations() { + ann[k] = v + } + // add all new containers and update existing containers for i := range matches { inst := matches[i] @@ -395,10 +409,18 @@ func (s *updateState) update(wl *PodWorkload, matches []*cloudsqlapi.AuthProxyWo newContainer := corev1.Container{} s.updateContainer(inst, wl, &newContainer) containers = append(containers, newContainer) + + // Add pod annotation for each instance + k, v := PodAnnotation(inst) + ann[k] = v } podSpec.Containers = containers + if len(ann) != 0 { + wl.SetPodTemplateAnnotations(ann) + } + for i := range podSpec.Containers { c := &podSpec.Containers[i] s.updateContainerEnv(c) diff --git a/internal/workload/podspec_updates_test.go b/internal/workload/podspec_updates_test.go index 813fc06f..763fb811 100644 --- a/internal/workload/podspec_updates_test.go +++ b/internal/workload/podspec_updates_test.go @@ -775,3 +775,38 @@ func assertContainerArgsContains(t *testing.T, gotArgs, wantArgs []string) { } } } + +func TestPodTemplateAnnotations(t *testing.T) { + var ( + wantAnnotations = map[string]string{ + "cloudsql.cloud.google.com/instance1": "1", + "cloudsql.cloud.google.com/instance2": "2", + } + + u = workload.NewUpdater("cloud-sql-proxy-operator/dev") + ) + + // Create a pod + wl := podWorkload() + wl.Pod.Spec.Containers[0].Ports = + []corev1.ContainerPort{{Name: "http", ContainerPort: 8080}} + + // Create a AuthProxyWorkload that matches the deployment + csqls := []*v1alpha1.AuthProxyWorkload{ + simpleAuthProxy("instance1", "project:server:db"), + simpleAuthProxy("instance2", "project:server2:db2")} + csqls[0].ObjectMeta.Generation = 1 + csqls[1].ObjectMeta.Generation = 2 + + // update the containers + err := configureProxies(u, wl, csqls) + if err != nil { + t.Fatal(err) + } + + // test that annotation was set properly + if !reflect.DeepEqual(wl.PodTemplateAnnotations(), wantAnnotations) { + t.Errorf("got %v, want %v for proxy container command", wl.PodTemplateAnnotations(), wantAnnotations) + } + +} diff --git a/tests/e2e_test.go b/tests/e2e_test.go index 45b8793d..aa7eacd9 100644 --- a/tests/e2e_test.go +++ b/tests/e2e_test.go @@ -26,7 +26,6 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" ) func TestMain(m *testing.M) { @@ -255,21 +254,6 @@ func TestProxyAppliedOnExistingWorkload(t *testing.T) { t.Fatal(err) } - // if this is an apps/v1 resource with a mutable pod template, - // force a rolling update. - if wl, ok := test.o.(workload.WithMutablePodTemplate); ok { - // patch the workload, add an annotation to the podspec - t.Log("Customer updates the workload triggering a rollout") - controllerutil.CreateOrPatch(ctx, tp.Client, test.o.Object(), func() error { - wl.SetPodTemplateAnnotations(map[string]string{"customer": "updated"}) - return nil - }) - - if err != nil { - t.Fatal(err) - } - } - t.Logf("Wait for %v pods to have 2 containers", test.allOrAny) err = tp.ExpectPodContainerCount(ctx, selector, 2, test.allOrAny) if err != nil { From c523cd047c3f9d747a13d45ecc4b070dfd123437 Mon Sep 17 00:00:00 2001 From: "Jonathan Hess (he/him)" <103529393+hessjcg@users.noreply.github.com> Date: Wed, 8 Feb 2023 12:49:49 -0700 Subject: [PATCH 16/29] chore: upgrade to go 1.20 (#201) Upgrades to go 1.20.0 for this go module as well as for all builds. Related to #196 --- .github/renovate.json | 2 +- .github/workflows/build.yaml | 2 +- Dockerfile-operator | 2 +- go.mod | 2 +- tools/e2e_test_job.sh | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/renovate.json b/.github/renovate.json index aaaaeb49..8af344fa 100644 --- a/.github/renovate.json +++ b/.github/renovate.json @@ -16,7 +16,7 @@ ], "force": { "constraints": { - "go": "1.18" + "go": "1.20" } } } diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index bdb11e27..bcfd6b2c 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -42,7 +42,7 @@ jobs: - name: Setup Go uses: actions/setup-go@v3 with: - go-version: '1.19' + go-version: '1.20' - name: Checkout code uses: actions/checkout@v3 with: diff --git a/Dockerfile-operator b/Dockerfile-operator index f1fefcdb..abbacd9a 100644 --- a/Dockerfile-operator +++ b/Dockerfile-operator @@ -13,7 +13,7 @@ # limitations under the License. # Use the latest stable golang 1.x to compile to a binary -FROM --platform=$BUILDPLATFORM golang:1 as build +FROM --platform=$BUILDPLATFORM golang:1.20 as build WORKDIR /work COPY . . diff --git a/go.mod b/go.mod index a00c17c3..2e05500d 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module github.com/GoogleCloudPlatform/cloud-sql-proxy-operator -go 1.18 +go 1.20 require ( github.com/go-logr/logr v1.2.3 diff --git a/tools/e2e_test_job.sh b/tools/e2e_test_job.sh index 9685894c..d2c30954 100644 --- a/tools/e2e_test_job.sh +++ b/tools/e2e_test_job.sh @@ -48,7 +48,7 @@ which helm # Install go echo "TIME: $(date) Install Go" -curl -L -o bin/go.tar.gz https://go.dev/dl/go1.18.10.linux-amd64.tar.gz +curl -L -o bin/go.tar.gz https://go.dev/dl/go1.20.linux-amd64.tar.gz rm -rf /usr/local/go && tar -C /usr/local -xzf bin/go.tar.gz export PATH=$PATH:/usr/local/go/bin go version From e11caed179f82ca3d24322d9f80a95174911bddd Mon Sep 17 00:00:00 2001 From: "Jonathan Hess (he/him)" <103529393+hessjcg@users.noreply.github.com> Date: Wed, 8 Feb 2023 15:07:19 -0700 Subject: [PATCH 17/29] feat: automatic changes to workloads when an AuthProxyWorload is deleted (#200) When an AuthProxyWorkload is deleted, automatically update all related workloads that support automatic rollout: Deployment, StatefulSet, DaemonSet, ReplicaSet. Related to #187 --- .../authproxyworkload_controller.go | 64 ++++--- .../authproxyworkload_controller_test.go | 175 +++++++++++++----- internal/testhelpers/resources.go | 6 +- internal/testhelpers/testcases.go | 2 +- internal/testintegration/integration_test.go | 4 +- internal/workload/podspec_updates.go | 17 +- internal/workload/podspec_updates_test.go | 45 ++++- tests/e2e_test.go | 91 ++++++++- 8 files changed, 311 insertions(+), 93 deletions(-) diff --git a/internal/controller/authproxyworkload_controller.go b/internal/controller/authproxyworkload_controller.go index e3a9ad5f..e808bfce 100644 --- a/internal/controller/authproxyworkload_controller.go +++ b/internal/controller/authproxyworkload_controller.go @@ -156,7 +156,7 @@ func (r *AuthProxyWorkloadReconciler) Reconcile(ctx context.Context, req ctrl.Re "gen", resource.GetGeneration()) r.recentlyDeleted.set(req.NamespacedName, true) // the object has been deleted - return r.doDelete(ctx, resource, l) + return r.doDelete(ctx, resource) } l.Info("Reconcile add/update for AuthProxyWorkload", @@ -169,10 +169,15 @@ func (r *AuthProxyWorkloadReconciler) Reconcile(ctx context.Context, req ctrl.Re // doDelete removes our finalizer and updates the related workloads // when the reconcile loop receives an AuthProxyWorkload that was deleted. -func (r *AuthProxyWorkloadReconciler) doDelete(ctx context.Context, resource *cloudsqlapi.AuthProxyWorkload, l logr.Logger) (ctrl.Result, error) { +func (r *AuthProxyWorkloadReconciler) doDelete(ctx context.Context, resource *cloudsqlapi.AuthProxyWorkload) (ctrl.Result, error) { // Mark all related workloads as needing to be updated - _, err := r.updateWorkloadStatus(ctx, l, resource) + allWorkloads, err := r.updateWorkloadStatus(ctx, resource) + if err != nil { + return requeueNow, err + } + + _, err = r.updateWorkloadAnnotations(ctx, resource, allWorkloads) if err != nil { return requeueNow, err } @@ -231,7 +236,7 @@ func (r *AuthProxyWorkloadReconciler) doCreateUpdate(ctx context.Context, l logr } // find all workloads that relate to this AuthProxyWorkload resource - allWorkloads, err := r.updateWorkloadStatus(ctx, l, resource) + allWorkloads, err := r.updateWorkloadStatus(ctx, resource) if err != nil { // State 1.2 - unable to read workloads, abort and try again after a delay. return requeueWithDelay, err @@ -247,25 +252,9 @@ func (r *AuthProxyWorkloadReconciler) doCreateUpdate(ctx context.Context, l logr // State 3.*: Workloads already exist. Some may need to be updated to roll out // changes. - var outOfDateCount int - for _, wl := range allWorkloads { - wlChanged := r.needsAnnotationUpdate(wl, resource) - if !wlChanged { - continue - } - - outOfDateCount++ - _, err = controllerutil.CreateOrPatch(ctx, r.Client, wl.Object(), func() error { - r.updateAnnotation(wl, resource) - return nil - }) - - // State 3.1 Failed to update one of the workloads PodTemplateSpec annotations, requeue. - if err != nil { - message := fmt.Sprintf("Reconciled %d matching workloads. Error updating workload %v: %v", len(allWorkloads), wl.Object().GetName(), err) - return r.reconcileResult(ctx, l, resource, orig, cloudsqlapi.ReasonWorkloadNeedsUpdate, message, false) - } - + outOfDateCount, err := r.updateWorkloadAnnotations(ctx, resource, allWorkloads) + if err != nil { + return requeueNow, err } // State 3.2 Successfully updated all workload PodTemplateSpec annotations, requeue @@ -282,14 +271,12 @@ func (r *AuthProxyWorkloadReconciler) doCreateUpdate(ctx context.Context, l logr // needsAnnotationUpdate returns true when the workload was annotated with // a different generation of the resource. func (r *AuthProxyWorkloadReconciler) needsAnnotationUpdate(wl workload.Workload, resource *cloudsqlapi.AuthProxyWorkload) bool { - // This workload is not mutable. Ignore it. if _, ok := wl.(workload.WithMutablePodTemplate); !ok { return false } k, v := workload.PodAnnotation(resource) - // Check if the correct annotation exists an := wl.PodTemplateAnnotations() if an != nil && an[k] == v { @@ -388,16 +375,15 @@ func (r *AuthProxyWorkloadReconciler) patchAuthProxyWorkloadStatus( // updates the needs update annotations using internal.UpdateWorkloadAnnotation. // Once the workload is saved, the workload admission mutate webhook will // apply the correct containers to this instance. -func (r *AuthProxyWorkloadReconciler) updateWorkloadStatus(ctx context.Context, _ logr.Logger, resource *cloudsqlapi.AuthProxyWorkload) (matching []workload.Workload, retErr error) { +func (r *AuthProxyWorkloadReconciler) updateWorkloadStatus(ctx context.Context, resource *cloudsqlapi.AuthProxyWorkload) (matching []workload.Workload, retErr error) { matching, err := r.listWorkloads(ctx, resource.Spec.Workload, resource.GetNamespace()) if err != nil { return nil, err } - // all matching workloads get a new annotation that will be removed - // when the reconcile loop for outOfDate is completed. for _, wl := range matching { + // update the status condition for a workload s := newStatus(wl) s.Conditions = replaceCondition(s.Conditions, &metav1.Condition{ Type: cloudsqlapi.ConditionWorkloadUpToDate, @@ -531,3 +517,25 @@ func (r *AuthProxyWorkloadReconciler) loadByLabelSelector(ctx context.Context, w return wl.Workloads(), nil } + +func (r *AuthProxyWorkloadReconciler) updateWorkloadAnnotations(ctx context.Context, resource *cloudsqlapi.AuthProxyWorkload, workloads []workload.Workload) (int, error) { + var outOfDate int + for _, wl := range workloads { + if r.needsAnnotationUpdate(wl, resource) { + outOfDate++ + + _, err := controllerutil.CreateOrPatch(ctx, r.Client, wl.Object(), func() error { + r.updateAnnotation(wl, resource) + return nil + }) + + // Failed to update one of the workloads PodTemplateSpec annotations. + if err != nil { + return 0, fmt.Errorf("reconciled %d matching workloads. Error removing proxy from workload %v: %v", len(workloads), wl.Object().GetName(), err) + } + } + } + + return outOfDate, nil + +} diff --git a/internal/controller/authproxyworkload_controller_test.go b/internal/controller/authproxyworkload_controller_test.go index 2e013fc7..b3c2a319 100644 --- a/internal/controller/authproxyworkload_controller_test.go +++ b/internal/controller/authproxyworkload_controller_test.go @@ -18,6 +18,7 @@ import ( "context" "fmt" "os" + "strings" "testing" "go.uber.org/zap/zapcore" @@ -69,11 +70,8 @@ func TestReconcileDeleted(t *testing.T) { Namespace: "default", Name: "test", }, "project:region:db") - p.Finalizers = []string{finalizerName} - p.Spec.Workload = v1alpha1.WorkloadSelectorSpec{ - Kind: "Pod", - Name: "thing", - } + addFinalizers(p) + addPodWorkload(p) cb, err := clientBuilder() if err != nil { @@ -114,11 +112,8 @@ func TestReconcileState21ByName(t *testing.T) { Namespace: "default", Name: "test", }, "project:region:db") - p.Finalizers = []string{finalizerName} - p.Spec.Workload = v1alpha1.WorkloadSelectorSpec{ - Kind: "Pod", - Name: "testpod", - } + addFinalizers(p) + addPodWorkload(p) err := runReconcileTestcase(p, []client.Object{p}, false, metav1.ConditionTrue, v1alpha1.ReasonNoWorkloadsFound) if err != nil { @@ -131,13 +126,8 @@ func TestReconcileState21BySelector(t *testing.T) { Namespace: "default", Name: "test", }, "project:region:db") - p.Finalizers = []string{finalizerName} - p.Spec.Workload = v1alpha1.WorkloadSelectorSpec{ - Kind: "Pod", - Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{"app": "things"}, - }, - } + addFinalizers(p) + addSelectorWorkload(p, "Pod", "app", "things") err := runReconcileTestcase(p, []client.Object{p}, false, metav1.ConditionTrue, v1alpha1.ReasonNoWorkloadsFound) if err != nil { @@ -147,27 +137,20 @@ func TestReconcileState21BySelector(t *testing.T) { } func TestReconcileState32(t *testing.T) { - wantRequeue := true - wantStatus := metav1.ConditionFalse - wantReason := v1alpha1.ReasonWorkloadNeedsUpdate - + const ( + wantRequeue = true + wantStatus = metav1.ConditionFalse + wantReason = v1alpha1.ReasonWorkloadNeedsUpdate + labelK = "app" + labelV = "things" + ) p := testhelpers.BuildAuthProxyWorkload(types.NamespacedName{ Namespace: "default", Name: "test", }, "project:region:db") p.Generation = 2 - p.Finalizers = []string{finalizerName} - p.Spec.Workload = v1alpha1.WorkloadSelectorSpec{ - Kind: "Deployment", - Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{"app": "things"}, - }, - } - p.Status.Conditions = []*metav1.Condition{{ - Type: v1alpha1.ConditionUpToDate, - Reason: v1alpha1.ReasonStartedReconcile, - Status: metav1.ConditionFalse, - }} + addFinalizers(p) + addSelectorWorkload(p, "Deployment", labelK, labelV) // mimic a pod that was updated by the webhook reqName := v1alpha1.AnnotationPrefix + "/" + p.Name @@ -175,7 +158,7 @@ func TestReconcileState32(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "thing", Namespace: "default", - Labels: map[string]string{"app": "things"}, + Labels: map[string]string{labelK: labelV}, }, Spec: appsv1.DeploymentSpec{Template: corev1.PodTemplateSpec{ ObjectMeta: metav1.ObjectMeta{Annotations: map[string]string{reqName: "1"}}, @@ -190,27 +173,21 @@ func TestReconcileState32(t *testing.T) { } func TestReconcileState33(t *testing.T) { - wantRequeue := false - wantStatus := metav1.ConditionTrue - wantReason := v1alpha1.ReasonFinishedReconcile + const ( + wantRequeue = false + wantStatus = metav1.ConditionTrue + wantReason = v1alpha1.ReasonFinishedReconcile + labelK = "app" + labelV = "things" + ) p := testhelpers.BuildAuthProxyWorkload(types.NamespacedName{ Namespace: "default", Name: "test", }, "project:region:db") p.Generation = 1 - p.Finalizers = []string{finalizerName} - p.Spec.Workload = v1alpha1.WorkloadSelectorSpec{ - Kind: "Deployment", - Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{"app": "things"}, - }, - } - p.Status.Conditions = []*metav1.Condition{{ - Type: v1alpha1.ConditionUpToDate, - Reason: v1alpha1.ReasonStartedReconcile, - Status: metav1.ConditionFalse, - }} + addFinalizers(p) + addSelectorWorkload(p, "Deployment", labelK, labelV) // mimic a pod that was updated by the webhook reqName := v1alpha1.AnnotationPrefix + "/" + p.Name @@ -218,7 +195,7 @@ func TestReconcileState33(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "thing", Namespace: "default", - Labels: map[string]string{"app": "things"}, + Labels: map[string]string{labelK: labelV}, }, Spec: appsv1.DeploymentSpec{Template: corev1.PodTemplateSpec{ ObjectMeta: metav1.ObjectMeta{Annotations: map[string]string{reqName: "1"}}, @@ -232,6 +209,86 @@ func TestReconcileState33(t *testing.T) { } +func TestReconcileDeleteUpdatesWorkload(t *testing.T) { + const ( + labelK = "app" + labelV = "things" + ) + resource := testhelpers.BuildAuthProxyWorkload(types.NamespacedName{ + Namespace: "default", + Name: "test", + }, "project:region:db") + resource.Generation = 1 + addFinalizers(resource) + addSelectorWorkload(resource, "Deployment", labelK, labelV) + + k, v := workload.PodAnnotation(resource) + + // mimic a deployment that was updated by the webhook + deployment := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "thing", + Namespace: "default", + Labels: map[string]string{labelK: labelV}, + }, + Spec: appsv1.DeploymentSpec{Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{Annotations: map[string]string{k: v}}, + }}, + } + + // Build a client with the resource and deployment + cb, err := clientBuilder() + if err != nil { + t.Error(err) // shouldn't ever happen + } + c := cb.WithObjects(resource, deployment).Build() + r, req, ctx := reconciler(resource, c) + + // Delete the resource + c.Delete(ctx, resource) + if err != nil { + t.Error(err) + } + + // Run Reconcile on the deleted resource + res, err := r.Reconcile(ctx, req) + if err != nil { + t.Error(err) + } + if res.Requeue { + t.Errorf("got %v, want %v for requeue", res.Requeue, false) + } + + // Check that the resource doesn't exist anymore + err = c.Get(ctx, types.NamespacedName{ + Namespace: resource.GetNamespace(), + Name: resource.GetName(), + }, resource) + if err != nil { + if !errors.IsNotFound(err) { + t.Errorf("wants not found error, got %v", err) + } + } else { + t.Error("wants not found error, got no error") + } + + // Fetch the deployment and make sure the annotations show the + // deleted resource. + d := &appsv1.Deployment{} + err = c.Get(ctx, types.NamespacedName{ + Namespace: deployment.GetNamespace(), + Name: deployment.GetName(), + }, d) + if err != nil { + t.Fatal(err) + } + + if got, want := d.Spec.Template.ObjectMeta.Annotations[k], "1-deleted-"; !strings.HasPrefix(got, want) { + t.Fatalf("got %v, wants annotation value to have prefix %v", got, want) + } + +} + func runReconcileTestcase(p *v1alpha1.AuthProxyWorkload, clientObjects []client.Object, wantRequeue bool, wantStatus metav1.ConditionStatus, wantReason string) error { cb, err := clientBuilder() if err != nil { @@ -304,3 +361,21 @@ func reconciler(p *v1alpha1.AuthProxyWorkload, cb client.Client) (*AuthProxyWork } return r, req, ctx } + +func addFinalizers(p *v1alpha1.AuthProxyWorkload) { + p.Finalizers = []string{finalizerName} +} +func addPodWorkload(p *v1alpha1.AuthProxyWorkload) { + p.Spec.Workload = v1alpha1.WorkloadSelectorSpec{ + Kind: "Pod", + Name: "testpod", + } +} +func addSelectorWorkload(p *v1alpha1.AuthProxyWorkload, kind, labelK, labelV string) { + p.Spec.Workload = v1alpha1.WorkloadSelectorSpec{ + Kind: kind, + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{labelK: labelV}, + }, + } +} diff --git a/internal/testhelpers/resources.go b/internal/testhelpers/resources.go index 7b8687a8..d976f257 100644 --- a/internal/testhelpers/resources.go +++ b/internal/testhelpers/resources.go @@ -595,7 +595,7 @@ func BuildAuthProxyWorkload(key types.NamespacedName, connectionString string) * } // CreateAuthProxyWorkload creates an AuthProxyWorkload in the kubernetes cluster. -func (cc *TestCaseClient) CreateAuthProxyWorkload(ctx context.Context, key types.NamespacedName, appLabel string, connectionString string, kind string) error { +func (cc *TestCaseClient) CreateAuthProxyWorkload(ctx context.Context, key types.NamespacedName, appLabel string, connectionString string, kind string) (*v1alpha1.AuthProxyWorkload, error) { proxy := BuildAuthProxyWorkload(key, connectionString) proxy.Spec.Workload = v1alpha1.WorkloadSelectorSpec{ Kind: kind, @@ -613,9 +613,9 @@ func (cc *TestCaseClient) CreateAuthProxyWorkload(ctx context.Context, key types } err := cc.Client.Create(ctx, proxy) if err != nil { - return fmt.Errorf("Unable to create entity %v", err) + return nil, fmt.Errorf("Unable to create entity %v", err) } - return nil + return proxy, nil } // GetConditionStatus finds a condition where Condition.Type == condType and returns diff --git a/internal/testhelpers/testcases.go b/internal/testhelpers/testcases.go index d27784c3..10317ade 100644 --- a/internal/testhelpers/testcases.go +++ b/internal/testhelpers/testcases.go @@ -53,7 +53,7 @@ func (cc *TestCaseClient) CreateResource(ctx context.Context) (*cloudsqlapi.Auth return nil, fmt.Errorf("can't create namespace, %v", err) } key := types.NamespacedName{Name: name, Namespace: ns} - err = cc.CreateAuthProxyWorkload(ctx, key, "app", expectedConnStr, "Deployment") + _, err = cc.CreateAuthProxyWorkload(ctx, key, "app", expectedConnStr, "Deployment") if err != nil { return nil, fmt.Errorf("unable to create auth proxy workload %v", err) } diff --git a/internal/testintegration/integration_test.go b/internal/testintegration/integration_test.go index 2c1a5c71..8181c927 100644 --- a/internal/testintegration/integration_test.go +++ b/internal/testintegration/integration_test.go @@ -85,7 +85,7 @@ func TestModifiesNewDeployment(t *testing.T) { key := types.NamespacedName{Name: pwlName, Namespace: tcc.Namespace} t.Log("Creating AuthProxyWorkload") - err = tcc.CreateAuthProxyWorkload(ctx, key, deploymentAppLabel, tcc.ConnectionString, "Deployment") + _, err = tcc.CreateAuthProxyWorkload(ctx, key, deploymentAppLabel, tcc.ConnectionString, "Deployment") if err != nil { t.Error(err) return @@ -158,7 +158,7 @@ func TestModifiesExistingDeployment(t *testing.T) { } t.Log("Creating cloud sql instance") - err = tcc.CreateAuthProxyWorkload(ctx, pKey, deploymentAppLabel, tcc.ConnectionString, "Deployment") + _, err = tcc.CreateAuthProxyWorkload(ctx, pKey, deploymentAppLabel, tcc.ConnectionString, "Deployment") if err != nil { t.Fatal(err) } diff --git a/internal/workload/podspec_updates.go b/internal/workload/podspec_updates.go index 2a175c64..4607ba44 100644 --- a/internal/workload/podspec_updates.go +++ b/internal/workload/podspec_updates.go @@ -18,6 +18,7 @@ import ( "fmt" "sort" "strings" + "time" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" @@ -47,12 +48,18 @@ const ( var l = logf.Log.WithName("internal.workload") -// PodAnnotation returns the key and value for an annotation on a pod -// to indicate the specific AuthProxyWorkload resource that was configured -// on the pod. +// PodAnnotation returns the annotation (key, value) that should be added to +// pods that are configured with this AuthProxyWorkload resource. This takes +// into account whether the AuthProxyWorkload exists or was recently deleted. func PodAnnotation(r *cloudsqlapi.AuthProxyWorkload) (string, string) { - return fmt.Sprintf("%s/%s", cloudsqlapi.AnnotationPrefix, r.Name), - fmt.Sprintf("%d", r.Generation) + k := fmt.Sprintf("%s/%s", cloudsqlapi.AnnotationPrefix, r.Name) + v := fmt.Sprintf("%d", r.Generation) + // if r was deleted, use a different value + if r.GetDeletionTimestamp() != nil { + v = fmt.Sprintf("%d-deleted-%s", r.Generation, r.GetDeletionTimestamp().Format(time.RFC3339)) + } + + return k, v } // Updater holds global state used while reconciling workloads. diff --git a/internal/workload/podspec_updates_test.go b/internal/workload/podspec_updates_test.go index 763fb811..37d0ac8c 100644 --- a/internal/workload/podspec_updates_test.go +++ b/internal/workload/podspec_updates_test.go @@ -19,6 +19,7 @@ import ( "reflect" "strconv" "testing" + "time" "github.com/GoogleCloudPlatform/cloud-sql-proxy-operator/internal/api/v1alpha1" "github.com/GoogleCloudPlatform/cloud-sql-proxy-operator/internal/workload" @@ -777,7 +778,10 @@ func assertContainerArgsContains(t *testing.T, gotArgs, wantArgs []string) { } func TestPodTemplateAnnotations(t *testing.T) { + var ( + now = metav1.Now() + wantAnnotations = map[string]string{ "cloudsql.cloud.google.com/instance1": "1", "cloudsql.cloud.google.com/instance2": "2", @@ -794,9 +798,13 @@ func TestPodTemplateAnnotations(t *testing.T) { // Create a AuthProxyWorkload that matches the deployment csqls := []*v1alpha1.AuthProxyWorkload{ simpleAuthProxy("instance1", "project:server:db"), - simpleAuthProxy("instance2", "project:server2:db2")} + simpleAuthProxy("instance2", "project:server2:db2"), + simpleAuthProxy("instance3", "project:server3:db3")} + csqls[0].ObjectMeta.Generation = 1 csqls[1].ObjectMeta.Generation = 2 + csqls[2].ObjectMeta.Generation = 3 + csqls[2].ObjectMeta.DeletionTimestamp = &now // update the containers err := configureProxies(u, wl, csqls) @@ -810,3 +818,38 @@ func TestPodTemplateAnnotations(t *testing.T) { } } + +func TestPodAnnotation(t *testing.T) { + now := metav1.Now() + server := &v1alpha1.AuthProxyWorkload{ObjectMeta: metav1.ObjectMeta{Name: "instance1", Generation: 1}} + deletedServer := &v1alpha1.AuthProxyWorkload{ObjectMeta: metav1.ObjectMeta{Name: "instance2", Generation: 2, DeletionTimestamp: &now}} + + var testcases = []struct { + name string + r *v1alpha1.AuthProxyWorkload + wantK string + wantV string + }{ + { + name: "instance1", + r: server, + wantK: "cloudsql.cloud.google.com/instance1", + wantV: "1", + }, { + name: "instance2", + r: deletedServer, + wantK: "cloudsql.cloud.google.com/instance2", + wantV: fmt.Sprintf("2-deleted-%s", now.Format(time.RFC3339)), + }, + } + + for _, tc := range testcases { + gotK, gotV := workload.PodAnnotation(tc.r) + if tc.wantK != gotK { + t.Errorf("got %v, want %v for key", gotK, tc.wantK) + } + if tc.wantV != gotV { + t.Errorf("got %v, want %v for value", gotV, tc.wantV) + } + } +} diff --git a/tests/e2e_test.go b/tests/e2e_test.go index aa7eacd9..57f0d54a 100644 --- a/tests/e2e_test.go +++ b/tests/e2e_test.go @@ -128,7 +128,7 @@ func TestProxyAppliedOnNewWorkload(t *testing.T) { key := types.NamespacedName{Name: pwlName, Namespace: tp.Namespace} t.Log("Creating AuthProxyWorkload") - err = tp.CreateAuthProxyWorkload(ctx, key, appLabel, tp.ConnectionString, kind) + _, err = tp.CreateAuthProxyWorkload(ctx, key, appLabel, tp.ConnectionString, kind) if err != nil { t.Fatal(err) } @@ -243,7 +243,7 @@ func TestProxyAppliedOnExistingWorkload(t *testing.T) { } t.Log("Creating AuthProxyWorkload") - err = tp.CreateAuthProxyWorkload(ctx, key, appLabel, tp.ConnectionString, kind) + _, err = tp.CreateAuthProxyWorkload(ctx, key, appLabel, tp.ConnectionString, kind) if err != nil { t.Fatal(err) } @@ -335,7 +335,7 @@ func TestPublicDBConnections(t *testing.T) { wl.Deployment.Spec.Template = test.podTemplate t.Log("Creating AuthProxyWorkload") - err = tp.CreateAuthProxyWorkload(ctx, key, appLabel, tp.ConnectionString, kind) + _, err = tp.CreateAuthProxyWorkload(ctx, key, appLabel, tp.ConnectionString, kind) if err != nil { t.Fatal(err) } @@ -373,3 +373,88 @@ func TestPublicDBConnections(t *testing.T) { } } + +func TestUpdateWorkloadOnDelete(t *testing.T) { + + const ( + pwlName = "newss" + appLabel = "busybox" + name = "app" + allOrAny = "all" + ) + // Use a deployment workload + wl := &workload.DeploymentWorkload{Deployment: testhelpers.BuildDeployment(types.NamespacedName{}, "busybox")} + o := wl.Object() + kind := o.GetObjectKind().GroupVersionKind().Kind + + // Set up the e2e test namespace + skipCleanup := loadValue("SKIP_CLEANUP", "", "false") == "true" + ctx := testContext() + tp := newPublicPostgresClient("new" + strings.ToLower(kind)) + + err := tp.CreateOrPatchNamespace(ctx) + if err != nil { + t.Fatal(err) + } + + t.Cleanup(func() { + if skipCleanup { + return + } + + err = tp.DeleteNamespace(ctx) + if err != nil { + t.Fatal(err) + } + }) + + // Create AuthProxyWorkload + key := types.NamespacedName{Name: pwlName, Namespace: tp.Namespace} + + t.Log("Creating AuthProxyWorkload") + proxy, err := tp.CreateAuthProxyWorkload(ctx, key, appLabel, tp.ConnectionString, kind) + if err != nil { + t.Fatal(err) + } + + t.Log("Waiting for AuthProxyWorkload operator to begin the reconcile loop") + _, err = tp.GetAuthProxyWorkloadAfterReconcile(ctx, key) + if err != nil { + t.Fatal("unable to create AuthProxyWorkload", err) + } + + // Create deployment + t.Log("Creating ", kind) + o.SetNamespace(tp.Namespace) + o.SetName(name) + err = tp.CreateWorkload(ctx, o) + if err != nil { + t.Fatal("unable to create ", kind, err) + } + selector := &metav1.LabelSelector{ + MatchLabels: map[string]string{"app": appLabel}, + } + + // Check that the deployment pods are configured with the proxy: pods + // have 2 containers. + t.Log("Checking for container counts", kind) + err = tp.ExpectPodContainerCount(ctx, selector, 2, allOrAny) + if err != nil { + t.Error(err) + } + t.Log("Workload Created. Removing AuthProxyWorkload", kind) + + // Delete the AuthProxyWorkload + err = tp.Client.Delete(ctx, proxy) + if err != nil { + t.Fatal(err) + } + + // Check that deployment pods are configured without the proxy: pods have + // 1 container. + t.Log("Checking for container counts after delete", kind) + err = tp.ExpectPodContainerCount(ctx, selector, 1, allOrAny) + if err != nil { + t.Error(err) + } +} From 090b88da2f3cbc00ca98bee7cdfbb4e50a6c4cb9 Mon Sep 17 00:00:00 2001 From: "Jonathan Hess (he/him)" <103529393+hessjcg@users.noreply.github.com> Date: Wed, 8 Feb 2023 15:46:55 -0700 Subject: [PATCH 18/29] feat: add new field RolloutStrategy control automatic rollout (#202) Adds a new field RolloutStrategy to the AuthProxyWorkload that will allow users to control how the operator will roll out changes when an AuthProxyWorkload is updated. RolloutStrategy has two possible values: - "Workload" in which the operator will automatically follow the behavior of the Strategy set on the workload. - "None" in which the operator will not attempt to roll out changes. Related to #187 --- ...l.cloud.google.com_authproxyworkloads.yaml | 7 ++ installer/cloud-sql-proxy-operator.yaml | 7 ++ .../api/v1alpha1/authproxyworkload_types.go | 25 +++++ .../api/v1alpha1/authproxyworkload_webhook.go | 5 +- .../authproxyworkload_controller.go | 15 +++ .../authproxyworkload_controller_test.go | 97 ++++++++++++++++--- internal/workload/podspec_updates.go | 2 +- 7 files changed, 143 insertions(+), 15 deletions(-) diff --git a/config/crd/bases/cloudsql.cloud.google.com_authproxyworkloads.yaml b/config/crd/bases/cloudsql.cloud.google.com_authproxyworkloads.yaml index 0f1720df..e373e393 100644 --- a/config/crd/bases/cloudsql.cloud.google.com_authproxyworkloads.yaml +++ b/config/crd/bases/cloudsql.cloud.google.com_authproxyworkloads.yaml @@ -862,6 +862,13 @@ spec: description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object type: object + rolloutStrategy: + default: Workload + description: 'RolloutStrategy indicates the strategy to use when rolling out changes to the workloads affected by the results. When this is set to `Workload`, changes to this resource will be automatically applied to a running Deployment, StatefulSet, DaemonSet, or ReplicaSet in accordance with the Strategy set on that workload. When this is set to `None`, the operator will take no action to roll out changes to affected workloads. `Workload` will be used by default if no value is set. See: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy' + enum: + - Workload + - None + type: string sqlAdminAPIEndpoint: description: SQLAdminAPIEndpoint is a debugging parameter that when specified will change the Google Cloud api endpoint used by the proxy. type: string diff --git a/installer/cloud-sql-proxy-operator.yaml b/installer/cloud-sql-proxy-operator.yaml index c58e32e7..81b2d02a 100644 --- a/installer/cloud-sql-proxy-operator.yaml +++ b/installer/cloud-sql-proxy-operator.yaml @@ -880,6 +880,13 @@ spec: description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object type: object + rolloutStrategy: + default: Workload + description: 'RolloutStrategy indicates the strategy to use when rolling out changes to the workloads affected by the results. When this is set to `Workload`, changes to this resource will be automatically applied to a running Deployment, StatefulSet, DaemonSet, or ReplicaSet in accordance with the Strategy set on that workload. When this is set to `None`, the operator will take no action to roll out changes to affected workloads. `Workload` will be used by default if no value is set. See: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy' + enum: + - Workload + - None + type: string sqlAdminAPIEndpoint: description: SQLAdminAPIEndpoint is a debugging parameter that when specified will change the Google Cloud api endpoint used by the proxy. type: string diff --git a/internal/api/v1alpha1/authproxyworkload_types.go b/internal/api/v1alpha1/authproxyworkload_types.go index 04c73482..659ca743 100644 --- a/internal/api/v1alpha1/authproxyworkload_types.go +++ b/internal/api/v1alpha1/authproxyworkload_types.go @@ -62,6 +62,18 @@ const ( // ReasonUpToDate relates to condition WorkloadUpToDate, this reason is set // when there are no workloads related to this AuthProxyWorkload resource. ReasonUpToDate = "UpToDate" + + // WorkloadStrategy is the RolloutStrategy value that indicates that + // when the AuthProxyWorkload is updated or deleted, the changes should be + // applied to affected workloads (Deployments, StatefulSets, etc.) following + // the Strategy defined by that workload. + // See: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy + WorkloadStrategy = "Workload" + + // NoneStrategy is the RolloutStrategy value that indicates that the. + // when the AuthProxyWorkload is updated or deleted, no action should be taken + // by the operator to update the affected workloads. + NoneStrategy = "None" ) // AuthProxyWorkloadSpec defines the desired state of AuthProxyWorkload @@ -144,6 +156,19 @@ type AuthProxyContainerSpec struct { // will use the latest known compatible proxy image. //+kubebuilder:validation:Optional Image string `json:"image,omitempty"` + + // RolloutStrategy indicates the strategy to use when rolling out changes to + // the workloads affected by the results. When this is set to + // `Workload`, changes to this resource will be automatically applied + // to a running Deployment, StatefulSet, DaemonSet, or ReplicaSet in + // accordance with the Strategy set on that workload. When this is set to + // `None`, the operator will take no action to roll out changes to affected + // workloads. `Workload` will be used by default if no value is set. + // See: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy + //+kubebuilder:validation:Optional + //+kubebuilder:validation:Enum=Workload;None + //+kubebuilder:default=Workload + RolloutStrategy string `json:"rolloutStrategy,omitempty"` } // InstanceSpec describes the configuration for how the proxy should expose diff --git a/internal/api/v1alpha1/authproxyworkload_webhook.go b/internal/api/v1alpha1/authproxyworkload_webhook.go index 65f09133..d0880903 100644 --- a/internal/api/v1alpha1/authproxyworkload_webhook.go +++ b/internal/api/v1alpha1/authproxyworkload_webhook.go @@ -39,7 +39,10 @@ var _ webhook.Defaulter = &AuthProxyWorkload{} // Default implements webhook.Defaulter so a webhook will be registered for the type func (r *AuthProxyWorkload) Default() { authproxyworkloadlog.Info("default", "name", r.Name) - // TODO(user): fill in your defaulting logic. + if r.Spec.AuthProxyContainer != nil && + r.Spec.AuthProxyContainer.RolloutStrategy == "" { + r.Spec.AuthProxyContainer.RolloutStrategy = WorkloadStrategy + } } // TODO(user): change verbs to "verbs=create;update;delete" if you want to enable deletion validation. diff --git a/internal/controller/authproxyworkload_controller.go b/internal/controller/authproxyworkload_controller.go index e808bfce..d5961e89 100644 --- a/internal/controller/authproxyworkload_controller.go +++ b/internal/controller/authproxyworkload_controller.go @@ -276,6 +276,10 @@ func (r *AuthProxyWorkloadReconciler) needsAnnotationUpdate(wl workload.Workload return false } + if isRolloutStrategyNone(resource) { + return false + } + k, v := workload.PodAnnotation(resource) // Check if the correct annotation exists an := wl.PodTemplateAnnotations() @@ -295,6 +299,11 @@ func (r *AuthProxyWorkloadReconciler) updateAnnotation(wl workload.Workload, res return } + // The user has set "None" as the rollout strategy. Ignore it. + if isRolloutStrategyNone(resource) { + return + } + k, v := workload.PodAnnotation(resource) // add the annotation if needed... @@ -307,6 +316,12 @@ func (r *AuthProxyWorkloadReconciler) updateAnnotation(wl workload.Workload, res mpt.SetPodTemplateAnnotations(an) } +// isRolloutStrategyNone returns true when user has set "None" as the rollout strategy. +func isRolloutStrategyNone(resource *cloudsqlapi.AuthProxyWorkload) bool { + return resource.Spec.AuthProxyContainer != nil && + resource.Spec.AuthProxyContainer.RolloutStrategy == cloudsqlapi.NoneStrategy +} + // workloadsReconciled State 3.1: If workloads are all up to date, mark the condition // "UpToDate" true and do not requeue. func (r *AuthProxyWorkloadReconciler) reconcileResult(ctx context.Context, l logr.Logger, resource, orig *cloudsqlapi.AuthProxyWorkload, reason, message string, upToDate bool) (ctrl.Result, error) { diff --git a/internal/controller/authproxyworkload_controller_test.go b/internal/controller/authproxyworkload_controller_test.go index b3c2a319..c1ccac14 100644 --- a/internal/controller/authproxyworkload_controller_test.go +++ b/internal/controller/authproxyworkload_controller_test.go @@ -58,7 +58,7 @@ func TestReconcileState11(t *testing.T) { Name: "test", }, "project:region:db") - err := runReconcileTestcase(p, []client.Object{p}, true, "", "") + _, _, err := runReconcileTestcase(p, []client.Object{p}, true, "", "") if err != nil { t.Fatal(err) } @@ -115,7 +115,7 @@ func TestReconcileState21ByName(t *testing.T) { addFinalizers(p) addPodWorkload(p) - err := runReconcileTestcase(p, []client.Object{p}, false, metav1.ConditionTrue, v1alpha1.ReasonNoWorkloadsFound) + _, _, err := runReconcileTestcase(p, []client.Object{p}, false, metav1.ConditionTrue, v1alpha1.ReasonNoWorkloadsFound) if err != nil { t.Fatal(err) } @@ -129,7 +129,7 @@ func TestReconcileState21BySelector(t *testing.T) { addFinalizers(p) addSelectorWorkload(p, "Pod", "app", "things") - err := runReconcileTestcase(p, []client.Object{p}, false, metav1.ConditionTrue, v1alpha1.ReasonNoWorkloadsFound) + _, _, err := runReconcileTestcase(p, []client.Object{p}, false, metav1.ConditionTrue, v1alpha1.ReasonNoWorkloadsFound) if err != nil { t.Fatal(err) } @@ -165,11 +165,82 @@ func TestReconcileState32(t *testing.T) { }}, } - err := runReconcileTestcase(p, []client.Object{p, pod}, wantRequeue, wantStatus, wantReason) + c, ctx, err := runReconcileTestcase(p, []client.Object{p, pod}, wantRequeue, wantStatus, wantReason) if err != nil { t.Fatal(err) } + // Fetch the deployment and make sure the annotations show the + // deleted resource. + d := &appsv1.Deployment{} + err = c.Get(ctx, types.NamespacedName{ + Namespace: pod.GetNamespace(), + Name: pod.GetName(), + }, d) + if err != nil { + t.Fatal(err) + } + + if got, want := d.Spec.Template.ObjectMeta.Annotations[reqName], "2"; !strings.HasPrefix(got, "2") { + t.Fatalf("got %v, wants annotation value to have prefix %v", got, want) + } + +} + +func TestReconcileState32RolloutStrategyNone(t *testing.T) { + const ( + wantRequeue = false + wantStatus = metav1.ConditionTrue + wantReason = v1alpha1.ReasonFinishedReconcile + labelK = "app" + labelV = "things" + ) + + p := testhelpers.BuildAuthProxyWorkload(types.NamespacedName{ + Namespace: "default", + Name: "test", + }, "project:region:db") + p.Spec.AuthProxyContainer = &v1alpha1.AuthProxyContainerSpec{ + RolloutStrategy: v1alpha1.NoneStrategy, + } + p.Generation = 2 + addFinalizers(p) + addSelectorWorkload(p, "Deployment", labelK, labelV) + + // mimic a deployment that was updated by the webhook + deployment := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "thing", + Namespace: "default", + Labels: map[string]string{labelK: labelV}, + }, + Spec: appsv1.DeploymentSpec{Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{Annotations: map[string]string{ + "annotation": "set", + }}, + }}, + } + + c, ctx, err := runReconcileTestcase(p, []client.Object{p, deployment}, wantRequeue, wantStatus, wantReason) + if err != nil { + t.Fatal(err) + } + + // Fetch the deployment and make sure the annotations show the + // deleted resource. + d := &appsv1.Deployment{} + err = c.Get(ctx, types.NamespacedName{ + Namespace: deployment.GetNamespace(), + Name: deployment.GetName(), + }, d) + if err != nil { + t.Fatal(err) + } + + if got, want := len(d.Spec.Template.ObjectMeta.Annotations), 1; got != want { + t.Fatalf("got %v annotations, wants %v annotations", got, want) + } + } func TestReconcileState33(t *testing.T) { @@ -202,7 +273,7 @@ func TestReconcileState33(t *testing.T) { }}, } - err := runReconcileTestcase(p, []client.Object{p, pod}, wantRequeue, wantStatus, wantReason) + _, _, err := runReconcileTestcase(p, []client.Object{p, pod}, wantRequeue, wantStatus, wantReason) if err != nil { t.Fatal(err) } @@ -289,10 +360,10 @@ func TestReconcileDeleteUpdatesWorkload(t *testing.T) { } -func runReconcileTestcase(p *v1alpha1.AuthProxyWorkload, clientObjects []client.Object, wantRequeue bool, wantStatus metav1.ConditionStatus, wantReason string) error { +func runReconcileTestcase(p *v1alpha1.AuthProxyWorkload, clientObjects []client.Object, wantRequeue bool, wantStatus metav1.ConditionStatus, wantReason string) (client.WithWatch, context.Context, error) { cb, err := clientBuilder() if err != nil { - return err // shouldn't ever happen + return nil, nil, err // shouldn't ever happen } c := cb.WithObjects(clientObjects...).Build() @@ -300,10 +371,10 @@ func runReconcileTestcase(p *v1alpha1.AuthProxyWorkload, clientObjects []client. r, req, ctx := reconciler(p, c) res, err := r.Reconcile(ctx, req) if err != nil { - return err + return nil, nil, err } if res.Requeue != wantRequeue { - return fmt.Errorf("got %v, want %v for requeue", res.Requeue, wantRequeue) + return nil, nil, fmt.Errorf("got %v, want %v for requeue", res.Requeue, wantRequeue) } for _, o := range clientObjects { @@ -316,17 +387,17 @@ func runReconcileTestcase(p *v1alpha1.AuthProxyWorkload, clientObjects []client. if wantStatus != "" || wantReason != "" { cond := findCondition(p.Status.Conditions, v1alpha1.ConditionUpToDate) if cond == nil { - return fmt.Errorf("the UpToDate condition was nil, wants condition to exist") + return nil, nil, fmt.Errorf("the UpToDate condition was nil, wants condition to exist") } if wantStatus != "" && cond.Status != wantStatus { - return fmt.Errorf("got %v, want %v for UpToDate condition status", cond.Status, wantStatus) + return nil, nil, fmt.Errorf("got %v, want %v for UpToDate condition status", cond.Status, wantStatus) } if wantReason != "" && cond.Reason != wantReason { - return fmt.Errorf("got %v, want %v for UpToDate condition reason", cond.Reason, wantReason) + return nil, nil, fmt.Errorf("got %v, want %v for UpToDate condition reason", cond.Reason, wantReason) } } - return nil + return c, ctx, nil } func clientBuilder() (*fake.ClientBuilder, error) { diff --git a/internal/workload/podspec_updates.go b/internal/workload/podspec_updates.go index 4607ba44..c815a441 100644 --- a/internal/workload/podspec_updates.go +++ b/internal/workload/podspec_updates.go @@ -55,7 +55,7 @@ func PodAnnotation(r *cloudsqlapi.AuthProxyWorkload) (string, string) { k := fmt.Sprintf("%s/%s", cloudsqlapi.AnnotationPrefix, r.Name) v := fmt.Sprintf("%d", r.Generation) // if r was deleted, use a different value - if r.GetDeletionTimestamp() != nil { + if !r.GetDeletionTimestamp().IsZero() { v = fmt.Sprintf("%d-deleted-%s", r.Generation, r.GetDeletionTimestamp().Format(time.RFC3339)) } From d4fe6d23b431bc8d6d6a200844ad1cf63d21a6c4 Mon Sep 17 00:00:00 2001 From: "Jonathan Hess (he/him)" <103529393+hessjcg@users.noreply.github.com> Date: Thu, 9 Feb 2023 13:24:33 -0700 Subject: [PATCH 19/29] chore: upgrade to latest controller-runtime and kubernetes apis (#203) Upgrades kubernetes and controller-runtime APIs: kubernetes 0.24.2 --> 0.26.1 controller-runtime 0.12.2 --> 0.14.4 --- ...l.cloud.google.com_authproxyworkloads.yaml | 36 +- go.mod | 68 ++- go.sum | 482 +++--------------- installer/cloud-sql-proxy-operator.yaml | 36 +- 4 files changed, 161 insertions(+), 461 deletions(-) diff --git a/config/crd/bases/cloudsql.cloud.google.com_authproxyworkloads.yaml b/config/crd/bases/cloudsql.cloud.google.com_authproxyworkloads.yaml index e373e393..9f01f75c 100644 --- a/config/crd/bases/cloudsql.cloud.google.com_authproxyworkloads.yaml +++ b/config/crd/bases/cloudsql.cloud.google.com_authproxyworkloads.yaml @@ -421,7 +421,7 @@ spec: description: Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated. type: string ports: - description: List of ports to expose from the container. Exposing a port here gives the system additional information about the network connections a container uses, but is primarily informational. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default "0.0.0.0" address inside a container will be accessible from the network. Cannot be updated. + description: List of ports to expose from the container. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default "0.0.0.0" address inside a container will be accessible from the network. Modifying this array with strategic merge patch may corrupt the data. For more information See https://github.com/kubernetes/kubernetes/issues/108255. Cannot be updated. items: description: ContainerPort represents a network port in a single container. properties: @@ -556,6 +556,21 @@ spec: resources: description: 'Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' properties: + claims: + description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable." + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map limits: additionalProperties: anyOf: @@ -843,6 +858,21 @@ spec: resources: description: Resources specifies the resources required for the proxy pod. properties: + claims: + description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable." + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map limits: additionalProperties: anyOf: @@ -959,7 +989,7 @@ spec: conditions: description: "Conditions show the status of the AuthProxyWorkload resource on this matching workload. \n The \"UpToDate\" condition indicates that the proxy was successfully applied to all matching workloads. See ConditionUpToDate." items: - description: "Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, type FooStatus struct{ // Represents the observations of a foo's current state. // Known .status.conditions.type are: \"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + description: "Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, \n type FooStatus struct{ // Represents the observations of a foo's current state. // Known .status.conditions.type are: \"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" properties: lastTransitionTime: description: lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. @@ -1024,7 +1054,7 @@ spec: conditions: description: "Conditions show the overall status of the AuthProxyWorkload resource on all matching workloads. \n The \"UpToDate\" condition indicates that the proxy was successfully applied to all matching workloads. See ConditionUpToDate." items: - description: "Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, type FooStatus struct{ // Represents the observations of a foo's current state. // Known .status.conditions.type are: \"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + description: "Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, \n type FooStatus struct{ // Represents the observations of a foo's current state. // Known .status.conditions.type are: \"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" properties: lastTransitionTime: description: lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. diff --git a/go.mod b/go.mod index 2e05500d..8412d381 100644 --- a/go.mod +++ b/go.mod @@ -5,75 +5,65 @@ go 1.20 require ( github.com/go-logr/logr v1.2.3 go.uber.org/zap v1.24.0 - k8s.io/api v0.24.2 - k8s.io/apimachinery v0.24.2 - k8s.io/client-go v0.24.2 - sigs.k8s.io/controller-runtime v0.12.2 + k8s.io/api v0.26.1 + k8s.io/apimachinery v0.26.1 + k8s.io/client-go v0.26.1 + sigs.k8s.io/controller-runtime v0.14.4 sigs.k8s.io/yaml v1.3.0 ) require ( - cloud.google.com/go v0.81.0 // indirect - github.com/Azure/go-autorest v14.2.0+incompatible // indirect - github.com/Azure/go-autorest/autorest v0.11.18 // indirect - github.com/Azure/go-autorest/autorest/adal v0.9.13 // indirect - github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect - github.com/Azure/go-autorest/logger v0.2.1 // indirect - github.com/Azure/go-autorest/tracing v0.6.0 // indirect - github.com/PuerkitoBio/purell v1.1.1 // indirect - github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/cespare/xxhash/v2 v2.1.2 // indirect github.com/davecgh/go-spew v1.1.1 // indirect - github.com/emicklei/go-restful v2.9.5+incompatible // indirect + github.com/emicklei/go-restful/v3 v3.9.0 // indirect github.com/evanphx/json-patch v4.12.0+incompatible // indirect - github.com/form3tech-oss/jwt-go v3.2.3+incompatible // indirect - github.com/fsnotify/fsnotify v1.5.1 // indirect - github.com/go-logr/zapr v1.2.0 // indirect + github.com/evanphx/json-patch/v5 v5.6.0 // indirect + github.com/fsnotify/fsnotify v1.6.0 // indirect + github.com/go-logr/zapr v1.2.3 // indirect github.com/go-openapi/jsonpointer v0.19.5 // indirect - github.com/go-openapi/jsonreference v0.19.5 // indirect + github.com/go-openapi/jsonreference v0.20.0 // indirect github.com/go-openapi/swag v0.19.14 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.2 // indirect github.com/google/gnostic v0.5.7-v3refs // indirect - github.com/google/go-cmp v0.5.5 // indirect + github.com/google/go-cmp v0.5.9 // indirect github.com/google/gofuzz v1.1.0 // indirect github.com/google/uuid v1.1.2 // indirect github.com/imdario/mergo v0.3.12 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/mailru/easyjson v0.7.6 // indirect - github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect + github.com/matttproud/golang_protobuf_extensions v1.0.2 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/pkg/errors v0.9.1 // indirect - github.com/prometheus/client_golang v1.12.1 // indirect - github.com/prometheus/client_model v0.2.0 // indirect - github.com/prometheus/common v0.32.1 // indirect - github.com/prometheus/procfs v0.7.3 // indirect + github.com/prometheus/client_golang v1.14.0 // indirect + github.com/prometheus/client_model v0.3.0 // indirect + github.com/prometheus/common v0.37.0 // indirect + github.com/prometheus/procfs v0.8.0 // indirect github.com/spf13/pflag v1.0.5 // indirect go.uber.org/atomic v1.7.0 // indirect go.uber.org/multierr v1.6.0 // indirect - golang.org/x/crypto v0.0.0-20220214200702-86341886e292 // indirect - golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd // indirect - golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 // indirect - golang.org/x/sys v0.0.0-20220209214540-3681064d5158 // indirect - golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 // indirect - golang.org/x/text v0.3.7 // indirect - golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 // indirect + golang.org/x/net v0.3.1-0.20221206200815-1e63c2f08a10 // indirect + golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b // indirect + golang.org/x/sys v0.3.0 // indirect + golang.org/x/term v0.3.0 // indirect + golang.org/x/text v0.5.0 // indirect + golang.org/x/time v0.3.0 // indirect gomodules.xyz/jsonpatch/v2 v2.2.0 // indirect google.golang.org/appengine v1.6.7 // indirect - google.golang.org/protobuf v1.27.1 // indirect + google.golang.org/protobuf v1.28.1 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/apiextensions-apiserver v0.24.2 // indirect - k8s.io/component-base v0.24.2 // indirect - k8s.io/klog/v2 v2.60.1 // indirect - k8s.io/kube-openapi v0.0.0-20220328201542-3ee0da9b0b42 // indirect - k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9 // indirect - sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2 // indirect - sigs.k8s.io/structured-merge-diff/v4 v4.2.1 // indirect + k8s.io/apiextensions-apiserver v0.26.1 // indirect + k8s.io/component-base v0.26.1 // indirect + k8s.io/klog/v2 v2.80.1 // indirect + k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280 // indirect + k8s.io/utils v0.0.0-20221128185143-99ec85e7a448 // indirect + sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect ) diff --git a/go.sum b/go.sum index 475c57eb..830526d2 100644 --- a/go.sum +++ b/go.sum @@ -13,12 +13,6 @@ cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKV cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= -cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= -cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= -cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= -cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= -cloud.google.com/go v0.81.0 h1:at8Tk2zUz63cLPR0JPWm5vp77pEZmzxEQBEfRKn1VV8= -cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= @@ -27,7 +21,6 @@ cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4g cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= -cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= @@ -38,56 +31,20 @@ cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohl cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= -github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= -github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= -github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= -github.com/Azure/go-autorest/autorest v0.11.18 h1:90Y4srNYrwOtAgVo3ndrQkTYn6kf1Eg/AjTFJ8Is2aM= -github.com/Azure/go-autorest/autorest v0.11.18/go.mod h1:dSiJPy22c3u0OtOKDNttNgqpNFY/GeWa7GH/Pz56QRA= -github.com/Azure/go-autorest/autorest/adal v0.9.13 h1:Mp5hbtOePIzM8pJVRa3YLrWWmZtoxRXqUEzCfJt3+/Q= -github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M= -github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw= -github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= -github.com/Azure/go-autorest/autorest/mocks v0.4.1 h1:K0laFcLE6VLTOwNgSxaGbUcLPuGXlNkbVvq4cW4nIHk= -github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= -github.com/Azure/go-autorest/logger v0.2.1 h1:IG7i4p/mDa2Ce4TRyAO8IHnVhAVF3RFU+ZtXWSmf4Tg= -github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= -github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo= -github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= -github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= -github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI= -github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M= -github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= -github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= -github.com/antlr/antlr4/runtime/Go/antlr v0.0.0-20210826220005-b48c857c3a0e/go.mod h1:F7bn7fEU90QkQ3tnmaTx3LTKLEDqnwWODIYppRQ5hnY= -github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= -github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= -github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= -github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= -github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= -github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM= github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= -github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/certifi/gocertifi v0.0.0-20191021191039-0944d244cd40/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= -github.com/certifi/gocertifi v0.0.0-20200922220541-2c3bb06c6054/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= -github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE= github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= @@ -96,92 +53,54 @@ github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5P github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cockroachdb/datadriven v0.0.0-20200714090401-bf6692d28da5/go.mod h1:h6jFvWxBdQXxjopDMZyH2UVceIRfR84bdzbkoKrsWNo= -github.com/cockroachdb/errors v1.2.4/go.mod h1:rQD95gz6FARkaKkQXUksEje/d9a6wBJoCr5oaCLELYA= -github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f/go.mod h1:i/u985jwjWRlyHXQbwatDASoW0RMlZ/3i9yJHE2xLkI= -github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= -github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= -github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= -github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= -github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= -github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= -github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= -github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= -github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= -github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= -github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= -github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= -github.com/emicklei/go-restful v2.9.5+incompatible h1:spTtZBk5DYEvbxMVutUuTyh1Ao2r4iyvLdACqsl/Ljk= -github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/emicklei/go-restful/v3 v3.9.0 h1:XwGDlfxEnQZzuopoqxwSEllNcCOM9DhhFyhFIIGKwxE= +github.com/emicklei/go-restful/v3 v3.9.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= -github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/evanphx/json-patch v0.5.2/go.mod h1:ZWS5hhDbVDyob71nXKNL0+PWn6ToqBHMikGIFbs31qQ= github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84= github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= -github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= -github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= -github.com/form3tech-oss/jwt-go v3.2.3+incompatible h1:7ZaBxOI7TMoYBfyA3cQHErNNyAWIKUMIwqxEtgHOs5c= -github.com/form3tech-oss/jwt-go v3.2.3+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= -github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= -github.com/fsnotify/fsnotify v1.5.1 h1:mZcQUHVQUQWoPXXtuf9yuEXKudkV2sx1E06UadKWpgI= -github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5Ai1i3InKU= -github.com/getkin/kin-openapi v0.76.0/go.mod h1:660oXbgy5JFMKreazJaQTw7o+X00qeSyhcnluiMv+Xg= -github.com/getsentry/raven-go v0.2.0/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ= -github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/evanphx/json-patch/v5 v5.6.0 h1:b91NhWfaz02IuVxO9faSllyAtNXHMPkC5J8sJCLunww= +github.com/evanphx/json-patch/v5 v5.6.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4= +github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= +github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= +github.com/go-kit/log v0.2.0/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= -github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= -github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= +github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/zapr v1.2.0 h1:n4JnPI1T3Qq1SFEi/F8rwLrZERp2bso19PJZDB9dayk= -github.com/go-logr/zapr v1.2.0/go.mod h1:Qa4Bsj2Vb+FAVeAKsLD8RLQ+YRJB8YDmOAKxaBQf7Ro= +github.com/go-logr/zapr v1.2.3 h1:a9vnzlIBPQBBkeaR9IuMUfmVOrQlkoC4YfPoFkX3T7A= +github.com/go-logr/zapr v1.2.3/go.mod h1:eIauM6P8qSvTw5o2ez6UEAfGjQKrxQTl5EoK+Qa2oG4= github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY= github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= -github.com/go-openapi/jsonreference v0.19.5 h1:1WJP/wi4OjB4iV8KVbH73rQaoialJrqv8gitZLxGLtM= -github.com/go-openapi/jsonreference v0.19.5/go.mod h1:RdybgQwPxbL4UEjuAruzK1x3nE69AqPYEJeo/TWfEeg= +github.com/go-openapi/jsonreference v0.20.0 h1:MYlu0sBgChmCfJxxUKZ8g1cPWFOB37YSZqewK7OKeyA= +github.com/go-openapi/jsonreference v0.20.0/go.mod h1:Ag74Ico3lPc+zR+qjn4XBUmXymS4zJbYVCZmcgkasdo= github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.19.14 h1:gm3vOOXfiuw5i9p5N9xJvfjvuofpyvLA9Wr6QfK5Fng= github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= -github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= -github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -194,7 +113,6 @@ github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= -github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -210,14 +128,10 @@ github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QD github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= -github.com/google/cel-go v0.10.1/go.mod h1:U7ayypeSkw23szu4GaQTPJGx66c20mx8JklMSxrmI1w= -github.com/google/cel-spec v0.6.0/go.mod h1:Nwjgxy5CbjlPrtCWjeDjUyKMl8w41YBYGjsyDdqk0xA= github.com/google/gnostic v0.5.7-v3refs h1:FhTMOKj2VhjpouxvWJAV1TL304uMlb9zcDqkl6cEI54= github.com/google/gnostic v0.5.7-v3refs/go.mod h1:73MKFl6jIHelAJNaBGFzt3SPtZULs9dYrGFt8OiIsHQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= @@ -227,17 +141,15 @@ github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= @@ -245,54 +157,17 @@ github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hf github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.1.2 h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= -github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= -github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= -github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= -github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y= -github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= -github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= -github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= -github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= -github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= -github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= -github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= -github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= -github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= -github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= -github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= -github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= -github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= -github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= -github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= -github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= -github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU= github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= -github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= -github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= -github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= @@ -303,16 +178,12 @@ github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnr github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= -github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= -github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= -github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= @@ -320,28 +191,13 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.7.6 h1:8yTIVnZgCoiM1TgqoeTl+LfU5Jg6/xL3QhGQnimLYnA= github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= -github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= -github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI= -github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= -github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= -github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= -github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= -github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= -github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= -github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= -github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6/go.mod h1:E2VnQOmVuvZB6UYnnDB0qG5Nq/1tD9acaOpo6xmt0Kw= +github.com/matttproud/golang_protobuf_extensions v1.0.2 h1:hAHbPm5IJGijwng3PWk09JkG9WeqChjprR5s9bBZ+OM= +github.com/matttproud/golang_protobuf_extensions v1.0.2/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -349,95 +205,52 @@ github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lN github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= -github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= -github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= -github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= -github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= -github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= -github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= -github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= -github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= -github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= -github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= -github.com/onsi/gomega v1.18.1 h1:M1GfJqGRrBrrGGsbxzV5dqM2U2ApXefZCQpkukxYRLE= -github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= -github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= -github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= -github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= +github.com/onsi/ginkgo/v2 v2.6.0 h1:9t9b9vRUbFq3C4qKFCGkVuq/fIHji802N1nrtkh1mNc= +github.com/onsi/gomega v1.24.1 h1:KORJXNNTzJXzu4ScJWssJfJMnJ+2QJqhoQSRwNlze9E= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= -github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= -github.com/prometheus/client_golang v1.12.1 h1:ZiaPsmm9uiBeaSMRznKsCDNtPCS0T3JVDGF+06gjBzk= github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= +github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw= +github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= -github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4= +github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= -github.com/prometheus/common v0.32.1 h1:hWIdL3N2HoUx3B8j3YN9mWor0qhY/NlEKZEaXxuIRh4= github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= +github.com/prometheus/common v0.37.0 h1:ccBbHCgIiT9uSoFY0vX8H3zsNR5eLt17/RQLUvn8pXE= +github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/procfs v0.7.3 h1:4jVXhlkAyzOScmCkXBTOLRLTz8EeU+eyjrwB/EPq0VU= github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= -github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= -github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo= +github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= -github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= -github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= -github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= -github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= -github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= -github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= -github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= -github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0= -github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= -github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= -github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= -github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo= -github.com/spf13/cobra v1.4.0/go.mod h1:Wo4iy3BUC+X2Fybo0PDqwJIv3dNRiZLHQymsfxlB84g= -github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= -github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -448,72 +261,30 @@ github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5 github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk= -github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= -github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4= -go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= -go.etcd.io/etcd/api/v3 v3.5.1/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= -go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= -go.etcd.io/etcd/client/pkg/v3 v3.5.1/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= -go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsXlzd7alYQ= -go.etcd.io/etcd/client/v3 v3.5.0/go.mod h1:AIKXXVX/DQXtfTEqBryiLTUXwON+GuvO6Z7lLS/oTh0= -go.etcd.io/etcd/client/v3 v3.5.1/go.mod h1:OnjH4M8OnAotwaB2l9bVgZzRFKru7/ZMoS46OtKyd3Q= -go.etcd.io/etcd/pkg/v3 v3.5.0/go.mod h1:UzJGatBQ1lXChBkQF0AuAtkRQMYnHubxAEYIrC3MSsE= -go.etcd.io/etcd/raft/v3 v3.5.0/go.mod h1:UFOHSIvO/nKwd4lhkwabrTD3cqW5yVyYYf/KlD00Szc= -go.etcd.io/etcd/server/v3 v3.5.0/go.mod h1:3Ah5ruV+M+7RZr0+Y/5mNLwC+eQlni+mQmOVdCRJoS4= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= -go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/contrib v0.20.0/go.mod h1:G/EtFaa6qaN7+LxqfIAT3GiZa7Wv5DTBUzl5H4LY0Kc= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.20.0/go.mod h1:oVGt1LRbBOBq1A5BQLlUg9UaU/54aiHw8cgjV3aWZ/E= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.20.0/go.mod h1:2AboqHi0CiIZU0qwhtUfCYD1GeUzvvIXWNkhDt7ZMG4= -go.opentelemetry.io/otel v0.20.0/go.mod h1:Y3ugLH2oa81t5QO+Lty+zXf8zC9L26ax4Nzoxm/dooo= -go.opentelemetry.io/otel/exporters/otlp v0.20.0/go.mod h1:YIieizyaN77rtLJra0buKiNBOm9XQfkPEKBeuhoMwAM= -go.opentelemetry.io/otel/metric v0.20.0/go.mod h1:598I5tYlH1vzBjn+BTuhzTCSb/9debfNp6R3s7Pr1eU= -go.opentelemetry.io/otel/oteltest v0.20.0/go.mod h1:L7bgKf9ZB7qCwT9Up7i9/pn0PWIa9FqQ2IQ8LoxiGnw= -go.opentelemetry.io/otel/sdk v0.20.0/go.mod h1:g/IcepuwNsoiX5Byy2nNV0ySUF1em498m7hBWC279Yc= -go.opentelemetry.io/otel/sdk/export/metric v0.20.0/go.mod h1:h7RBNMsDJ5pmI1zExLi+bJK+Dr8NQCh0qGhm1KDnNlE= -go.opentelemetry.io/otel/sdk/metric v0.20.0/go.mod h1:knxiS8Xd4E/N+ZqKmUPf3gTTZ4/0TjTXukfxjzSTpHE= -go.opentelemetry.io/otel/trace v0.20.0/go.mod h1:6GjCW8zgDjwGHGa6GkyeB8+/5vjT16gUEi0Nf1iBdgw= -go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= -go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= -go.uber.org/goleak v1.1.12 h1:gZAh5/EyT/HQwlpkCy6wTpqfH9H8Lz8zbm3dZh+OyzA= -go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/goleak v1.2.0 h1:xqgm/S+aQvhWFTtR0XK3Jvg7z8kGV8P4X14IzwN3Eqk= go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= -go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= go.uber.org/zap v1.24.0 h1:FiJd5l1UOLj0wCgbSE0rwwXHzEdAZS6hiiSnxJN/D60= go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20220214200702-86341886e292 h1:f+lwQ+GtmgoY+A2YaQxlSOnDjXcQ7ZRLWOHbC6HtRqE= -golang.org/x/crypto v0.0.0-20220214200702-86341886e292/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -536,8 +307,6 @@ golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHl golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= @@ -546,17 +315,9 @@ golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzB golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= @@ -568,7 +329,6 @@ golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -579,40 +339,24 @@ golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/ golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= -golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20210825183410-e898025ed96a/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd h1:O7DYs+zxREGLKzKoMQrtrEacpb0ZVXA5rIwylE2Xchk= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.3.1-0.20221206200815-1e63c2f08a10 h1:Frnccbp+ok2GkUS2tC84yAq/U9Vg+0sIO7aRL3T4Xnc= +golang.org/x/net v0.3.1-0.20221206200815-1e63c2f08a10/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 h1:RerP+noqYHUQ8CMRcPlC2nvTa4dcBIjegkuWdcUDuqg= -golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b h1:clP8eMhB30EHdc0bd2Twtq6kgU7yl5ub2cQLSdrv1Dg= +golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -623,13 +367,8 @@ golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -640,11 +379,7 @@ golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -659,77 +394,55 @@ golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210831042530-f4d43177bf5e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220209214540-3681064d5158 h1:rm+CHSpPEEW2IsXUib1ThaHIjuBVZjxNgSKmBLFfD4c= -golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.3.0 h1:w8ZOecv6NaNa/zC8944JTU3vz4u6Lagfk4RPQxv92NQ= +golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 h1:JGgROgKl9N8DuW20oFS5gxc+lE67/N3FcwmBPMe7ArY= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.3.0 h1:qoo4akIqOcDME5bhc/NgxUdovd6BSS2uMsVjB56q1xI= +golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.5.0 h1:OLmvp0KP+FVG99Ct/qFiL/Fhk4zp4QQnZ7b2U+5piUM= +golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 h1:vVKdlvoWBphwdxWKrFZEuM0kGgGLxUOYcY4U/2Vjg44= -golang.org/x/time v0.0.0-20220210224613-90d013bbcef8/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= +golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -749,7 +462,6 @@ golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjs golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= @@ -757,20 +469,10 @@ golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roY golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= -golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= -golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.10-0.20220218145154-897bd77cd717/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= gomodules.xyz/jsonpatch/v2 v2.2.0 h1:4pT439QV83L+G9FkcCriY6EkpcK6r6bK+A5FBUMI7qY= gomodules.xyz/jsonpatch/v2 v2.2.0/go.mod h1:WXp+iVDkoLQqPudfQ9GBlwB2eZ5DKOnjQZCYdOS8GPY= @@ -790,11 +492,6 @@ google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0M google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= -google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= -google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= -google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= -google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= -google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -824,31 +521,15 @@ google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfG google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201102152239-715cce707fb0/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= -google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= -google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20220107163113-42d7afdf6368/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= @@ -861,16 +542,6 @@ google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKa google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= -google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= -google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -883,8 +554,8 @@ google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGj google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.27.1 h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+RurQ= -google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= +google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -892,19 +563,10 @@ gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8 gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU= gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= -gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= -gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= -gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= -gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= -gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= @@ -916,8 +578,6 @@ gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= -gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= @@ -925,40 +585,30 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -k8s.io/api v0.24.2 h1:g518dPU/L7VRLxWfcadQn2OnsiGWVOadTLpdnqgY2OI= -k8s.io/api v0.24.2/go.mod h1:AHqbSkTm6YrQ0ObxjO3Pmp/ubFF/KuM7jU+3khoBsOg= -k8s.io/apiextensions-apiserver v0.24.2 h1:/4NEQHKlEz1MlaK/wHT5KMKC9UKYz6NZz6JE6ov4G6k= -k8s.io/apiextensions-apiserver v0.24.2/go.mod h1:e5t2GMFVngUEHUd0wuCJzw8YDwZoqZfJiGOW6mm2hLQ= -k8s.io/apimachinery v0.24.2 h1:5QlH9SL2C8KMcrNJPor+LbXVTaZRReml7svPEh4OKDM= -k8s.io/apimachinery v0.24.2/go.mod h1:82Bi4sCzVBdpYjyI4jY6aHX+YCUchUIrZrXKedjd2UM= -k8s.io/apiserver v0.24.2/go.mod h1:pSuKzr3zV+L+MWqsEo0kHHYwCo77AT5qXbFXP2jbvFI= -k8s.io/client-go v0.24.2 h1:CoXFSf8if+bLEbinDqN9ePIDGzcLtqhfd6jpfnwGOFA= -k8s.io/client-go v0.24.2/go.mod h1:zg4Xaoo+umDsfCWr4fCnmLEtQXyCNXCvJuSsglNcV30= -k8s.io/code-generator v0.24.2/go.mod h1:dpVhs00hTuTdTY6jvVxvTFCk6gSMrtfRydbhZwHI15w= -k8s.io/component-base v0.24.2 h1:kwpQdoSfbcH+8MPN4tALtajLDfSfYxBDYlXobNWI6OU= -k8s.io/component-base v0.24.2/go.mod h1:ucHwW76dajvQ9B7+zecZAP3BVqvrHoOxm8olHEg0nmM= -k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= -k8s.io/gengo v0.0.0-20211129171323-c02415ce4185/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= -k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= -k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= -k8s.io/klog/v2 v2.60.1 h1:VW25q3bZx9uE3vvdL6M8ezOX79vA2Aq1nEWLqNQclHc= -k8s.io/klog/v2 v2.60.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= -k8s.io/kube-openapi v0.0.0-20220328201542-3ee0da9b0b42 h1:Gii5eqf+GmIEwGNKQYQClCayuJCe2/4fZUvF7VG99sU= -k8s.io/kube-openapi v0.0.0-20220328201542-3ee0da9b0b42/go.mod h1:Z/45zLw8lUo4wdiUkI+v/ImEGAvu3WatcZl3lPMR4Rk= -k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9 h1:HNSDgDCrr/6Ly3WEGKZftiE7IY19Vz2GdbOCyI4qqhc= -k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/api v0.26.1 h1:f+SWYiPd/GsiWwVRz+NbFyCgvv75Pk9NK6dlkZgpCRQ= +k8s.io/api v0.26.1/go.mod h1:xd/GBNgR0f707+ATNyPmQ1oyKSgndzXij81FzWGsejg= +k8s.io/apiextensions-apiserver v0.26.1 h1:cB8h1SRk6e/+i3NOrQgSFij1B2S0Y0wDoNl66bn8RMI= +k8s.io/apiextensions-apiserver v0.26.1/go.mod h1:AptjOSXDGuE0JICx/Em15PaoO7buLwTs0dGleIHixSM= +k8s.io/apimachinery v0.26.1 h1:8EZ/eGJL+hY/MYCNwhmDzVqq2lPl3N3Bo8rvweJwXUQ= +k8s.io/apimachinery v0.26.1/go.mod h1:tnPmbONNJ7ByJNz9+n9kMjNP8ON+1qoAIIC70lztu74= +k8s.io/client-go v0.26.1 h1:87CXzYJnAMGaa/IDDfRdhTzxk/wzGZ+/HUQpqgVSZXU= +k8s.io/client-go v0.26.1/go.mod h1:IWNSglg+rQ3OcvDkhY6+QLeasV4OYHDjdqeWkDQZwGE= +k8s.io/component-base v0.26.1 h1:4ahudpeQXHZL5kko+iDHqLj/FSGAEUnSVO0EBbgDd+4= +k8s.io/component-base v0.26.1/go.mod h1:VHrLR0b58oC035w6YQiBSbtsf0ThuSwXP+p5dD/kAWU= +k8s.io/klog/v2 v2.80.1 h1:atnLQ121W371wYYFawwYx1aEY2eUfs4l3J72wtgAwV4= +k8s.io/klog/v2 v2.80.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= +k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280 h1:+70TFaan3hfJzs+7VK2o+OGxg8HsuBr/5f6tVAjDu6E= +k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280/go.mod h1:+Axhij7bCpeqhklhUTe3xmOn6bWxolyZEeyaFpjGtl4= +k8s.io/utils v0.0.0-20221128185143-99ec85e7a448 h1:KTgPnR10d5zhztWptI952TNtt/4u5h3IzDXkdIMuo2Y= +k8s.io/utils v0.0.0-20221128185143-99ec85e7a448/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.30/go.mod h1:fEO7lRTdivWO2qYVCVG7dEADOMo/MLDCVr8So2g88Uw= -sigs.k8s.io/controller-runtime v0.12.2 h1:nqV02cvhbAj7tbt21bpPpTByrXGn2INHRsi39lXy9sE= -sigs.k8s.io/controller-runtime v0.12.2/go.mod h1:qKsk4WE6zW2Hfj0G4v10EnNB2jMG1C+NTb8h+DwCoU0= -sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2 h1:kDi4JBNAsJWfz1aEXhO8Jg87JJaPNLh5tIzYHgStQ9Y= -sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2/go.mod h1:B+TnT182UBxE84DiCz4CVE26eOSDAeYCpfDnC2kdKMY= -sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= -sigs.k8s.io/structured-merge-diff/v4 v4.2.1 h1:bKCqE9GvQ5tiVHn5rfn1r+yao3aLQEaLzkkmAkf+A6Y= -sigs.k8s.io/structured-merge-diff/v4 v4.2.1/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4= -sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= +sigs.k8s.io/controller-runtime v0.14.4 h1:Kd/Qgx5pd2XUL08eOV2vwIq3L9GhIbJ5Nxengbd4/0M= +sigs.k8s.io/controller-runtime v0.14.4/go.mod h1:WqIdsAY6JBsjfc/CqO0CORmNtoCtE4S6qbPc9s68h+0= +sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 h1:iXTIw73aPyC+oRdyqqvVJuloN1p0AC/kzH07hu3NE+k= +sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= +sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE= +sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ihdVs8cGKBraizNC69E= sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= diff --git a/installer/cloud-sql-proxy-operator.yaml b/installer/cloud-sql-proxy-operator.yaml index 81b2d02a..fadf2fb4 100644 --- a/installer/cloud-sql-proxy-operator.yaml +++ b/installer/cloud-sql-proxy-operator.yaml @@ -439,7 +439,7 @@ spec: description: Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated. type: string ports: - description: List of ports to expose from the container. Exposing a port here gives the system additional information about the network connections a container uses, but is primarily informational. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default "0.0.0.0" address inside a container will be accessible from the network. Cannot be updated. + description: List of ports to expose from the container. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default "0.0.0.0" address inside a container will be accessible from the network. Modifying this array with strategic merge patch may corrupt the data. For more information See https://github.com/kubernetes/kubernetes/issues/108255. Cannot be updated. items: description: ContainerPort represents a network port in a single container. properties: @@ -574,6 +574,21 @@ spec: resources: description: 'Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' properties: + claims: + description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable." + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map limits: additionalProperties: anyOf: @@ -861,6 +876,21 @@ spec: resources: description: Resources specifies the resources required for the proxy pod. properties: + claims: + description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable." + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map limits: additionalProperties: anyOf: @@ -977,7 +1007,7 @@ spec: conditions: description: "Conditions show the status of the AuthProxyWorkload resource on this matching workload. \n The \"UpToDate\" condition indicates that the proxy was successfully applied to all matching workloads. See ConditionUpToDate." items: - description: "Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, type FooStatus struct{ // Represents the observations of a foo's current state. // Known .status.conditions.type are: \"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + description: "Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, \n type FooStatus struct{ // Represents the observations of a foo's current state. // Known .status.conditions.type are: \"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" properties: lastTransitionTime: description: lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. @@ -1042,7 +1072,7 @@ spec: conditions: description: "Conditions show the overall status of the AuthProxyWorkload resource on all matching workloads. \n The \"UpToDate\" condition indicates that the proxy was successfully applied to all matching workloads. See ConditionUpToDate." items: - description: "Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, type FooStatus struct{ // Represents the observations of a foo's current state. // Known .status.conditions.type are: \"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + description: "Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, \n type FooStatus struct{ // Represents the observations of a foo's current state. // Known .status.conditions.type are: \"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" properties: lastTransitionTime: description: lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. From 8d4a206100108f2d080e4d1a0f8b3ba63f78d68e Mon Sep 17 00:00:00 2001 From: "Jonathan Hess (he/him)" <103529393+hessjcg@users.noreply.github.com> Date: Fri, 17 Feb 2023 09:36:08 -0700 Subject: [PATCH 20/29] chore: use environment vars to configure global proxy settings (#206) Use environment variables instead of CLI flags to configure the global proxy settings for proxy containers. Related to #45 --- internal/workload/podspec_updates.go | 185 ++++++++++++++-------- internal/workload/podspec_updates_test.go | 45 +++--- 2 files changed, 135 insertions(+), 95 deletions(-) diff --git a/internal/workload/podspec_updates.go b/internal/workload/podspec_updates.go index c815a441..30fc942f 100644 --- a/internal/workload/podspec_updates.go +++ b/internal/workload/podspec_updates.go @@ -204,38 +204,30 @@ func (u *Updater) ConfigureWorkload(wl *PodWorkload, matches []*cloudsqlapi.Auth } type managedEnvVar struct { - Instance dbInstance `json:"dbInstance"` - OperatorManagedValue corev1.EnvVar `json:"operatorManagedValue"` - OriginalValues map[string]string `json:"originalValues,omitempty"` + Instance proxyInstanceID `json:"proxyInstanceID"` + ContainerName string `json:"containerName"` + OperatorManagedValue corev1.EnvVar `json:"operatorManagedValue"` } type managedPort struct { - Instance dbInstance `json:"dbInstance"` - OriginalValues map[string]int32 `json:"originalValues,omitempty"` - Port int32 `json:"port,omitempty"` + Instance proxyInstanceID `json:"proxyInstanceID"` + Port int32 `json:"port,omitempty"` } type managedVolume struct { Volume corev1.Volume `json:"volume"` VolumeMount corev1.VolumeMount `json:"volumeMount"` - Instance dbInstance `json:"dbInstance"` + Instance proxyInstanceID `json:"proxyInstanceID"` } -type dbInstance struct { +// proxyInstanceID is an identifier for a proxy and/or specific proxy database +// instance that created the EnvVar or Port. When this is empty, means that the +// EnvVar or Port was created by the user, and is not associated with a proxy +type proxyInstanceID struct { AuthProxyWorkload types.NamespacedName `json:"authProxyWorkload"` ConnectionString string `json:"connectionString"` } -func dbInst(namespace, name, connectionString string) dbInstance { - return dbInstance{ - AuthProxyWorkload: types.NamespacedName{ - Namespace: namespace, - Name: name, - }, - ConnectionString: connectionString, - } -} - // updateState holds internal state while a particular workload being configured // with one or more DBInstances. type updateState struct { @@ -248,14 +240,22 @@ type updateState struct { // workloadMods holds all modifications to this workload done by the operator so // so that it can be undone later. type workloadMods struct { - DBInstances []*dbInstance `json:"dbInstances"` - EnvVars []*managedEnvVar `json:"envVars"` - VolumeMounts []*managedVolume `json:"volumeMounts"` - Ports []*managedPort `json:"ports"` + DBInstances []*proxyInstanceID `json:"dbInstances"` + EnvVars []*managedEnvVar `json:"envVars"` + VolumeMounts []*managedVolume `json:"volumeMounts"` + Ports []*managedPort `json:"ports"` } -func (s *updateState) addInUsePort(p int32, containerName string) { - s.addPort(p, containerName, types.NamespacedName{}, "") +func (s *updateState) addWorkloadPort(p int32) { + // This port is associated with the workload, not the proxy. + // so this uses an empty proxyInstanceID{} + s.addPort(p, proxyInstanceID{}) +} + +func (s *updateState) addProxyPort(p int32) { + // This port is associated with the workload, not the proxy. + // so this uses an empty proxyInstanceID{} + s.addPort(p, proxyInstanceID{}) } // isPortInUse checks if the port is in use. @@ -314,12 +314,18 @@ func (s *updateState) useInstancePort(p *cloudsqlapi.AuthProxyWorkload, is *clou port, is.ConnectionString), p) } - s.addPort(port, "", n, is.ConnectionString) + s.addPort(port, proxyInstanceID{ + AuthProxyWorkload: types.NamespacedName{ + Name: p.Name, + Namespace: p.Namespace, + }, + ConnectionString: is.ConnectionString, + }) return port } -func (s *updateState) addPort(p int32, containerName string, n types.NamespacedName, connectionString string) { +func (s *updateState) addPort(p int32, instance proxyInstanceID) { var mp *managedPort for i := 0; i < len(s.mods.Ports); i++ { @@ -330,46 +336,78 @@ func (s *updateState) addPort(p int32, containerName string, n types.NamespacedN if mp == nil { mp = &managedPort{ - Instance: dbInst(n.Namespace, n.Name, connectionString), - Port: p, - OriginalValues: map[string]int32{}, + Instance: instance, + Port: p, } s.mods.Ports = append(s.mods.Ports, mp) } - if containerName != "" && !strings.HasPrefix(containerName, ContainerPrefix) { - mp.OriginalValues[containerName] = p - } +} +func (s *updateState) addProxyContainerEnvVar(p *cloudsqlapi.AuthProxyWorkload, k, v string) { + s.addEnvVar(p, managedEnvVar{ + Instance: proxyInstanceID{ + AuthProxyWorkload: types.NamespacedName{ + Namespace: p.Namespace, + Name: p.Name, + }, + }, + ContainerName: ContainerName(p), + OperatorManagedValue: corev1.EnvVar{Name: k, Value: v}, + }) } // addWorkloadEnvVar adds or replaces the envVar based on its Name, returning the old and new values func (s *updateState) addWorkloadEnvVar(p *cloudsqlapi.AuthProxyWorkload, is *cloudsqlapi.InstanceSpec, ev corev1.EnvVar) { + s.addEnvVar(p, managedEnvVar{ + Instance: proxyInstanceID{ + AuthProxyWorkload: types.NamespacedName{ + Namespace: p.Namespace, + Name: p.Name, + }, + ConnectionString: is.ConnectionString, + }, + OperatorManagedValue: ev, + }) +} +func (s *updateState) addEnvVar(p *cloudsqlapi.AuthProxyWorkload, v managedEnvVar) { for i := 0; i < len(s.mods.EnvVars); i++ { - if s.mods.EnvVars[i].OperatorManagedValue.Name == ev.Name { - old := s.mods.EnvVars[i].OperatorManagedValue - s.mods.EnvVars[i].OperatorManagedValue = ev - if old.Value != ev.Value { - s.addError(cloudsqlapi.ErrorCodeEnvConflict, - fmt.Sprintf("environment variable named %s already exists", ev.Name), p) - } + oldEnv := s.mods.EnvVars[i] + // if the values don't match and either one is global, or its set twice + if isEnvVarConflict(oldEnv, v) { + s.addError(cloudsqlapi.ErrorCodeEnvConflict, + fmt.Sprintf("environment variable named %s is set more than once", + oldEnv.OperatorManagedValue.Name), + p) return } } - s.mods.EnvVars = append(s.mods.EnvVars, &managedEnvVar{ - Instance: dbInst(p.Namespace, p.Name, is.ConnectionString), - OriginalValues: map[string]string{}, - OperatorManagedValue: ev, - }) + + s.mods.EnvVars = append(s.mods.EnvVars, &v) +} + +func isEnvVarConflict(oldEnv *managedEnvVar, v managedEnvVar) bool { + // it's a different name, no conflict + if oldEnv.OperatorManagedValue.Name != v.OperatorManagedValue.Name { + return false + } + + // if the envvar is intended for a different container + if oldEnv.ContainerName != v.ContainerName && oldEnv.ContainerName != "" && v.ContainerName != "" { + return false + } + + // different value, therefore conflict + return oldEnv.OperatorManagedValue.Value != v.OperatorManagedValue.Value } func (s *updateState) initState(pl []*cloudsqlapi.AuthProxyWorkload) { // Reset the mods.DBInstances to the list of pl being // applied right now. - s.mods.DBInstances = make([]*dbInstance, 0, len(pl)) + s.mods.DBInstances = make([]*proxyInstanceID, 0, len(pl)) for _, wl := range pl { for _, instance := range wl.Spec.Instances { s.mods.DBInstances = append(s.mods.DBInstances, - &dbInstance{ + &proxyInstanceID{ AuthProxyWorkload: types.NamespacedName{ Namespace: wl.Namespace, Name: wl.Name, @@ -399,7 +437,7 @@ func (s *updateState) update(wl *PodWorkload, matches []*cloudsqlapi.AuthProxyWo for i := 0; i < len(nonAuthProxyContainers); i++ { c := nonAuthProxyContainers[i] for j := 0; j < len(c.Ports); j++ { - s.addInUsePort(c.Ports[j].ContainerPort, c.Name) + s.addWorkloadPort(c.Ports[j].ContainerPort) } } @@ -455,19 +493,22 @@ func (s *updateState) updateContainer(p *cloudsqlapi.AuthProxyWorkload, wl Workl return } - // Build the c - var cliArgs []string - // always enable http port healthchecks on 0.0.0.0 and structured logs - cliArgs = s.addHealthCheck(p, c, cliArgs) + s.addHealthCheck(p, c) // add the user agent - cliArgs = append(cliArgs, fmt.Sprintf("--user-agent=%v", s.updater.userAgent)) + s.addProxyContainerEnvVar(p, "CSQL_PROXY_USER_AGENT", s.updater.userAgent) + + // configure structured logs + s.addProxyContainerEnvVar(p, "CSQL_PROXY_STRUCTURED_LOGS", "true") c.Name = ContainerName(p) c.ImagePullPolicy = "IfNotPresent" - cliArgs = s.applyContainerSpec(p, c, cliArgs) + s.applyContainerSpec(p, c) + + // Build the c + var cliArgs []string // Instances for i := range p.Spec.Instances { @@ -527,12 +568,12 @@ func (s *updateState) updateContainer(p *cloudsqlapi.AuthProxyWorkload, wl Workl // applyContainerSpec applies settings from cloudsqlapi.AuthProxyContainerSpec // to the container -func (s *updateState) applyContainerSpec(p *cloudsqlapi.AuthProxyWorkload, c *corev1.Container, cliArgs []string) []string { +func (s *updateState) applyContainerSpec(p *cloudsqlapi.AuthProxyWorkload, c *corev1.Container) { c.Image = s.defaultProxyImage() c.Resources = defaultContainerResources if p.Spec.AuthProxyContainer == nil { - return cliArgs + return } if p.Spec.AuthProxyContainer.Image != "" { @@ -544,25 +585,32 @@ func (s *updateState) applyContainerSpec(p *cloudsqlapi.AuthProxyWorkload, c *co } if p.Spec.AuthProxyContainer.SQLAdminAPIEndpoint != "" { - cliArgs = append(cliArgs, "--sqladmin-api-endpoint="+p.Spec.AuthProxyContainer.SQLAdminAPIEndpoint) + s.addProxyContainerEnvVar(p, "CSQL_PROXY_SQLADMIN_API_ENDPOINT", p.Spec.AuthProxyContainer.SQLAdminAPIEndpoint) } if p.Spec.AuthProxyContainer.MaxConnections != nil && *p.Spec.AuthProxyContainer.MaxConnections != 0 { - cliArgs = append(cliArgs, fmt.Sprintf("--max-connections=%d", *p.Spec.AuthProxyContainer.MaxConnections)) + s.addProxyContainerEnvVar(p, "CSQL_PROXY_MAX_CONNECTIONS", fmt.Sprintf("%d", *p.Spec.AuthProxyContainer.MaxConnections)) } if p.Spec.AuthProxyContainer.MaxSigtermDelay != nil && *p.Spec.AuthProxyContainer.MaxSigtermDelay != 0 { - cliArgs = append(cliArgs, fmt.Sprintf("--max-sigterm-delay=%d", *p.Spec.AuthProxyContainer.MaxSigtermDelay)) + s.addProxyContainerEnvVar(p, "CSQL_PROXY_MAX_SIGTERM_DELAY", fmt.Sprintf("%d", *p.Spec.AuthProxyContainer.MaxSigtermDelay)) } - return cliArgs + return } // updateContainerEnv applies global container state to all containers func (s *updateState) updateContainerEnv(c *corev1.Container) { for i := 0; i < len(s.mods.EnvVars); i++ { var found bool - operatorEnv := s.mods.EnvVars[i].OperatorManagedValue + v := s.mods.EnvVars[i] + operatorEnv := v.OperatorManagedValue + + // If this EnvVar is not for this container and not for all containers + // don't add it to this container. + if v.ContainerName != c.Name && v.ContainerName != "" { + continue + } for j := 0; j < len(c.Env); j++ { if operatorEnv.Name == c.Env[j].Name { @@ -578,11 +626,8 @@ func (s *updateState) updateContainerEnv(c *corev1.Container) { } // addHealthCheck adds the health check declaration to this workload. -func (s *updateState) addHealthCheck(_ *cloudsqlapi.AuthProxyWorkload, c *corev1.Container, cliArgs []string) []string { +func (s *updateState) addHealthCheck(p *cloudsqlapi.AuthProxyWorkload, c *corev1.Container) { port := DefaultHealthCheckPort - for s.isPortInUse(port) { - port++ - } c.StartupProbe = &corev1.Probe{ ProbeHandler: corev1.ProbeHandler{HTTPGet: &corev1.HTTPGetAction{ @@ -605,12 +650,12 @@ func (s *updateState) addHealthCheck(_ *cloudsqlapi.AuthProxyWorkload, c *corev1 }}, PeriodSeconds: 30, } - cliArgs = append(cliArgs, - fmt.Sprintf("--http-port=%d", port), - "--http-address=0.0.0.0", - "--health-check", - "--structured-logs") - return cliArgs + // Add a port that is associated with the proxy, but not a specific db instance + s.addPort(port, proxyInstanceID{AuthProxyWorkload: types.NamespacedName{Namespace: p.Namespace, Name: p.Name}}) + s.addProxyContainerEnvVar(p, "CSQL_PROXY_HTTP_PORT", fmt.Sprintf("%d", port)) + s.addProxyContainerEnvVar(p, "CSQL_PROXY_HTTP_ADDRESS", "0.0.0.0") + s.addProxyContainerEnvVar(p, "CSQL_PROXY_HEALTH_CHECK", "true") + return } func (s *updateState) addError(errorCode, description string, p *cloudsqlapi.AuthProxyWorkload) { diff --git a/internal/workload/podspec_updates_test.go b/internal/workload/podspec_updates_test.go index 37d0ac8c..a4b7dd06 100644 --- a/internal/workload/podspec_updates_test.go +++ b/internal/workload/podspec_updates_test.go @@ -517,6 +517,7 @@ func TestProxyCLIArgs(t *testing.T) { proxySpec v1alpha1.AuthProxyWorkloadSpec wantProxyArgContains []string wantErrorCodes []string + wantWorkloadEnv map[string]string } wantTrue := true wantFalse := false @@ -533,12 +534,12 @@ func TestProxyCLIArgs(t *testing.T) { PortEnvName: "DB_PORT", }}, }, - wantProxyArgContains: []string{ - "--structured-logs", - "--health-check", - fmt.Sprintf("--http-port=%d", workload.DefaultHealthCheckPort), - "--http-address=0.0.0.0", - "--user-agent=cloud-sql-proxy-operator/dev", + wantWorkloadEnv: map[string]string{ + "CSQL_PROXY_STRUCTURED_LOGS": "true", + "CSQL_PROXY_HEALTH_CHECK": "true", + "CSQL_PROXY_HTTP_PORT": fmt.Sprintf("%d", workload.DefaultHealthCheckPort), + "CSQL_PROXY_HTTP_ADDRESS": "0.0.0.0", + "CSQL_PROXY_USER_AGENT": "cloud-sql-proxy-operator/dev", }, }, { @@ -621,25 +622,6 @@ func TestProxyCLIArgs(t *testing.T) { fmt.Sprintf("hello:world:one?port=%d&private-ip=true", workload.DefaultFirstPort), fmt.Sprintf("hello:world:two?port=%d&private-ip=false", workload.DefaultFirstPort+1)}, }, - { - desc: "global flags", - proxySpec: v1alpha1.AuthProxyWorkloadSpec{ - AuthProxyContainer: &v1alpha1.AuthProxyContainerSpec{ - SQLAdminAPIEndpoint: "https://example.com", - MaxConnections: ptr(int64(10)), - MaxSigtermDelay: ptr(int64(20)), - }, - Instances: []v1alpha1.InstanceSpec{{ - ConnectionString: "hello:world:one", - }}, - }, - wantProxyArgContains: []string{ - fmt.Sprintf("hello:world:one?port=%d", workload.DefaultFirstPort), - "--sqladmin-api-endpoint=https://example.com", - "--max-connections=10", - "--max-sigterm-delay=20", - }, - }, { desc: "port conflict with other instance causes error", proxySpec: v1alpha1.AuthProxyWorkloadSpec{ @@ -714,6 +696,19 @@ func TestProxyCLIArgs(t *testing.T) { // test that port cli args are set correctly assertContainerArgsContains(t, csqlContainer.Args, tc.wantProxyArgContains) + // Test that workload has the right env vars + for wantKey, wantValue := range tc.wantWorkloadEnv { + gotEnvVar, err := findEnvVar(wl, csqlContainer.Name, wantKey) + if err != nil { + t.Error(err) + continue + } + + if gotEnvVar.Value != wantValue { + t.Errorf("got %v, wants %v workload env var %v", gotEnvVar, wantValue, wantKey) + } + } + }) } From 420037fc7f2c27ecffb68aa1dcdb33748500bfd9 Mon Sep 17 00:00:00 2001 From: "Jonathan Hess (he/him)" <103529393+hessjcg@users.noreply.github.com> Date: Fri, 17 Feb 2023 15:44:49 -0700 Subject: [PATCH 21/29] chore: add make target to build a local proxy image for e2e tests. (#208) This make target will allow the developer to build a proxy image from a local cloud-sql-proxy working directory for their local e2e tests. This is useful if you are writing code in the operator that depends on a not-yet-released feature in the proxy. --- Makefile | 17 +++++++++++++++++ build.sample.env | 6 ++++++ docs/dev.md | 20 ++++++++++++++++++++ tests/setup_test.go | 2 +- 4 files changed, 44 insertions(+), 1 deletion(-) diff --git a/Makefile b/Makefile index c36de67f..a7ac5d6e 100644 --- a/Makefile +++ b/Makefile @@ -375,6 +375,23 @@ e2e_image_push: generate # Build and push a operator image to the e2e artifact r DOCKER_FILE_NAME=Dockerfile-operator \ $(PWD)/tools/docker-build.sh +### +# Build a version of the cloud-sql-proxy from local sources +E2E_LOCAL_PROXY_PROJECT_DIR?=/not-set +E2E_LOCAL_PROXY_BUILD_URL_FILE=$(PWD)/bin/last-local-proxy-url.txt +E2E_LOCAL_PROXY_BUILD_URL=$(shell cat $(E2E_LOCAL_PROXY_BUILD_URL_FILE) | tr -d "\n") + +.PHONY: e2e_image_push +e2e_local_proxy_image_push: # Build and push the proxy image from a local working directory to the e2e artifact repo + test -d $(E2E_LOCAL_PROXY_PROJECT_DIR) && \ + PROJECT_DIR=$(E2E_LOCAL_PROXY_PROJECT_DIR) \ + IMAGE_NAME=cloud-sql-proxy-dev \ + REPO_URL=$(E2E_DOCKER_URL) \ + IMAGE_URL_OUT=$(E2E_LOCAL_PROXY_BUILD_URL_FILE) \ + PLATFORMS=linux/amd64 \ + DOCKER_FILE_NAME=Dockerfile \ + $(PWD)/tools/docker-build.sh + ## # Build tool dependencies diff --git a/build.sample.env b/build.sample.env index d968cb0e..e339fc21 100644 --- a/build.sample.env +++ b/build.sample.env @@ -6,3 +6,9 @@ ## # The gcloud project id to use for the end-to-end tests E2E_PROJECT_ID = my-gcloud-project + +## +# To run the e2e tests using the proxy code from your local machine, +# Uncomment this line and set the path to your local working directory +# of the cloud-sql-proxy project. +# E2E_LOCAL_PROXY_PROJECT_DIR = /home/me/projects/cloud-sql-proxy diff --git a/docs/dev.md b/docs/dev.md index 7a5b362f..475b4436 100644 --- a/docs/dev.md +++ b/docs/dev.md @@ -31,3 +31,23 @@ Then, to create the CRD for Workload ``` +## Running E2E tests with a custom proxy image + +You may want to write e2e tests for a proxy feature that has +not been released yet. + +Step 1: Check out the cloud-sql-proxy repo. + +Step 2: Add `E2E_LOCAL_PROXY_PROJECT_DIR = /home/me/projects/cloud-sql-proxy` +to your `build.env`. This tells your Makefile where your proxy +directory is. Set it to the path of your cloud-sql-proxy working directory. + +Step 3: Build a custom image and push it to the e2e environment +repo. Run `make e2e_local_proxy_image_push` This will build and push +a docker image from the proxy repo, and write the file `bin/last-local-proxy-url.txt` + +Step 4: Run your e2e tests. The tests will read the contents of +the file `bin/last-local-proxy-url.txt`. + +Delete the file `bin/last-local-proxy-url.txt` to go back to using +the public proxy iamge again \ No newline at end of file diff --git a/tests/setup_test.go b/tests/setup_test.go index 9244c957..5b86bf4f 100644 --- a/tests/setup_test.go +++ b/tests/setup_test.go @@ -110,7 +110,7 @@ func setupTests() (func(), error) { } // Read e2e test configuration - proxyImageURL = loadValue("PROXY_IMAGE_URL", "../bin/last-proxy-image-url.txt", workload.DefaultProxyImage) + proxyImageURL = loadValue("PROXY_IMAGE_URL", "../bin/last-local-proxy-url.txt", workload.DefaultProxyImage) operatorURL = loadValue("OPERATOR_IMAGE_URL", "../bin/last-gcloud-operator-url.txt", "operator:latest") testInfraPath := loadValue("TEST_INFRA_JSON", "", "../bin/testinfra.json") ti, err := loadTestInfra(testInfraPath) From 98c460bdd34dfa00815e664f60e38aa7327d92d4 Mon Sep 17 00:00:00 2001 From: "Jonathan Hess (he/him)" <103529393+hessjcg@users.noreply.github.com> Date: Fri, 17 Feb 2023 15:55:30 -0700 Subject: [PATCH 22/29] feat: Validate AuthProxyWorkload spec.selector field (#209) Ensure that the workload selector field is valid. Either name or selector must be set, but not both. Related to #36 --- .../api/v1alpha1/authproxyworkload_test.go | 185 ++++++++++++++++++ .../api/v1alpha1/authproxyworkload_webhook.go | 77 ++++++-- 2 files changed, 245 insertions(+), 17 deletions(-) create mode 100644 internal/api/v1alpha1/authproxyworkload_test.go diff --git a/internal/api/v1alpha1/authproxyworkload_test.go b/internal/api/v1alpha1/authproxyworkload_test.go new file mode 100644 index 00000000..91faef88 --- /dev/null +++ b/internal/api/v1alpha1/authproxyworkload_test.go @@ -0,0 +1,185 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1alpha1_test + +import ( + "testing" + + cloudsqlapi "github.com/GoogleCloudPlatform/cloud-sql-proxy-operator/internal/api/v1alpha1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func TestAuthProxyWorkload_ValidateCreate(t *testing.T) { + data := []struct { + desc string + spec cloudsqlapi.AuthProxyWorkloadSpec + wantValid bool + }{ + { + desc: "Valid WorkloadSelectorSpec with Name", + spec: cloudsqlapi.AuthProxyWorkloadSpec{ + Workload: cloudsqlapi.WorkloadSelectorSpec{ + Kind: "Deployment", + Name: "webapp", + }, + }, + wantValid: true, + }, + { + desc: "Valid WorkloadSelectorSpec with Selector", + spec: cloudsqlapi.AuthProxyWorkloadSpec{ + Workload: cloudsqlapi.WorkloadSelectorSpec{ + Kind: "Deployment", + Selector: &v1.LabelSelector{ + MatchLabels: map[string]string{"app": "sample"}, + }, + }, + }, + wantValid: true, + }, + { + desc: "Invalid, both workload selector and name both set", + spec: cloudsqlapi.AuthProxyWorkloadSpec{ + Workload: cloudsqlapi.WorkloadSelectorSpec{ + Kind: "Deployment", + Name: "webapp", + Selector: &v1.LabelSelector{ + MatchLabels: map[string]string{"app": "sample"}, + }, + }, + }, + wantValid: false, + }, + { + desc: "Invalid, WorkloadSelector missing name and selector", + spec: cloudsqlapi.AuthProxyWorkloadSpec{ + Workload: cloudsqlapi.WorkloadSelectorSpec{Kind: "Deployment"}, + }, + wantValid: false, + }, + { + desc: "Valid, Instance configured with PortEnvName", + spec: cloudsqlapi.AuthProxyWorkloadSpec{ + Workload: cloudsqlapi.WorkloadSelectorSpec{ + Kind: "Deployment", + Name: "webapp", + }, + Instances: []cloudsqlapi.InstanceSpec{{ + ConnectionString: "proj:region:db2", + PortEnvName: "DB_PORT", + }}, + }, + wantValid: true, + }, + } + + for _, tc := range data { + t.Run(tc.desc, func(t *testing.T) { + p := cloudsqlapi.AuthProxyWorkload{ + ObjectMeta: v1.ObjectMeta{Name: "sample"}, + Spec: tc.spec, + } + err := p.ValidateCreate() + gotValid := err == nil + switch { + case tc.wantValid && !gotValid: + t.Errorf("wants create valid, got error %v", err) + printFieldErrors(t, err) + case !tc.wantValid && gotValid: + t.Errorf("wants an error on create, got no error") + default: + t.Logf("create passed %s", tc.desc) + // test passes, do nothing. + } + }) + } +} + +func TestAuthProxyWorkload_ValidateUpdate(t *testing.T) { + data := []struct { + desc string + spec cloudsqlapi.AuthProxyWorkloadSpec + oldSpec cloudsqlapi.AuthProxyWorkloadSpec + wantValid bool + }{ + { + desc: "Valid, update adds another instance", + spec: cloudsqlapi.AuthProxyWorkloadSpec{ + Workload: cloudsqlapi.WorkloadSelectorSpec{ + Kind: "Deployment", + Name: "webapp", + }, + Instances: []cloudsqlapi.InstanceSpec{{ + ConnectionString: "proj:region:db1", + PortEnvName: "DB_PORT", + }}, + }, + oldSpec: cloudsqlapi.AuthProxyWorkloadSpec{ + Workload: cloudsqlapi.WorkloadSelectorSpec{ + Kind: "Deployment", + Name: "webapp", + }, + Instances: []cloudsqlapi.InstanceSpec{ + { + ConnectionString: "proj:region:db1", + PortEnvName: "DB_PORT", + }, + { + ConnectionString: "proj:region:db2", + PortEnvName: "DB_PORT2", + }, + }, + }, + wantValid: true, + }, + } + + for _, tc := range data { + t.Run(tc.desc, func(t *testing.T) { + p := cloudsqlapi.AuthProxyWorkload{ + ObjectMeta: v1.ObjectMeta{Name: "sample"}, + Spec: tc.spec, + } + oldP := cloudsqlapi.AuthProxyWorkload{ + ObjectMeta: v1.ObjectMeta{Name: "sample"}, + Spec: tc.oldSpec} + + err := p.ValidateUpdate(&oldP) + gotValid := err == nil + + switch { + case tc.wantValid && !gotValid: + t.Errorf("wants create valid, got error %v", err) + case !tc.wantValid && gotValid: + t.Errorf("wants an error on create, got no error") + default: + t.Logf("update passed %s", tc.desc) + // test passes, do nothing. + } + }) + } +} + +func printFieldErrors(t *testing.T, err error) { + t.Helper() + statusErr, ok := err.(*apierrors.StatusError) + if ok { + t.Errorf("Field status errors: ") + for _, v := range statusErr.Status().Details.Causes { + t.Errorf(" %v %v: %v ", v.Field, v.Type, v.Message) + } + } +} diff --git a/internal/api/v1alpha1/authproxyworkload_webhook.go b/internal/api/v1alpha1/authproxyworkload_webhook.go index d0880903..c05cafdf 100644 --- a/internal/api/v1alpha1/authproxyworkload_webhook.go +++ b/internal/api/v1alpha1/authproxyworkload_webhook.go @@ -15,7 +15,13 @@ package v1alpha1 import ( + "fmt" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/apis/meta/v1/validation" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/validation/field" ctrl "sigs.k8s.io/controller-runtime" logf "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/webhook" @@ -30,10 +36,7 @@ func (r *AuthProxyWorkload) SetupWebhookWithManager(mgr ctrl.Manager) error { Complete() } -// TODO(user): EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! - -//+kubebuilder:webhook:path=/mutate-cloudsql-cloud-google-com-v1alpha1-authproxyworkload,mutating=true,failurePolicy=fail,sideEffects=None,groups=cloudsql.cloud.google.com,resources=authproxyworkloads,verbs=create;update,versions=v1alpha1,name=mauthproxyworkload.kb.io,admissionReviewVersions=v1 - +// +kubebuilder:webhook:path=/mutate-cloudsql-cloud-google-com-v1alpha1-authproxyworkload,mutating=true,failurePolicy=fail,sideEffects=None,groups=cloudsql.cloud.google.com,resources=authproxyworkloads,verbs=create;update,versions=v1alpha1,name=mauthproxyworkload.kb.io,admissionReviewVersions=v1 var _ webhook.Defaulter = &AuthProxyWorkload{} // Default implements webhook.Defaulter so a webhook will be registered for the type @@ -45,31 +48,71 @@ func (r *AuthProxyWorkload) Default() { } } -// TODO(user): change verbs to "verbs=create;update;delete" if you want to enable deletion validation. -//+kubebuilder:webhook:path=/validate-cloudsql-cloud-google-com-v1alpha1-authproxyworkload,mutating=false,failurePolicy=fail,sideEffects=None,groups=cloudsql.cloud.google.com,resources=authproxyworkloads,verbs=create;update,versions=v1alpha1,name=vauthproxyworkload.kb.io,admissionReviewVersions=v1 - +// +kubebuilder:webhook:path=/validate-cloudsql-cloud-google-com-v1alpha1-authproxyworkload,mutating=false,failurePolicy=fail,sideEffects=None,groups=cloudsql.cloud.google.com,resources=authproxyworkloads,verbs=create;update,versions=v1alpha1,name=vauthproxyworkload.kb.io,admissionReviewVersions=v1 var _ webhook.Validator = &AuthProxyWorkload{} // ValidateCreate implements webhook.Validator so a webhook will be registered for the type func (r *AuthProxyWorkload) ValidateCreate() error { - authproxyworkloadlog.Info("validate create", "name", r.Name) - - // TODO(user): fill in your validation logic upon object creation. - return nil + return r.validate() } // ValidateUpdate implements webhook.Validator so a webhook will be registered for the type func (r *AuthProxyWorkload) ValidateUpdate(_ runtime.Object) error { - authproxyworkloadlog.Info("validate update", "name", r.Name) - - // TODO(user): fill in your validation logic upon object update. - return nil + return r.validate() } // ValidateDelete implements webhook.Validator so a webhook will be registered for the type func (r *AuthProxyWorkload) ValidateDelete() error { - authproxyworkloadlog.Info("validate delete", "name", r.Name) + return nil +} - // TODO(user): fill in your validation logic upon object deletion. +func (r *AuthProxyWorkload) validate() error { + var allErrs field.ErrorList + + allErrs = append(allErrs, validation.ValidateLabelName(r.Name, field.NewPath("metadata", "name"))...) + allErrs = append(allErrs, validateWorkload(&r.Spec.Workload, field.NewPath("spec", "workload"))...) + + if len(allErrs) > 0 { + return apierrors.NewInvalid( + schema.GroupKind{ + Group: GroupVersion.Group, + Kind: "AuthProxyWorkload"}, + r.Name, allErrs) + } return nil } + +var supportedKinds = []string{"CronJob", "Job", "StatefulSet", "Deployment", "DaemonSet", "ReplicaSet", "Pod"} + +func validateWorkload(spec *WorkloadSelectorSpec, f *field.Path) field.ErrorList { + var errs field.ErrorList + if spec.Selector != nil { + verr := validation.ValidateLabelSelector(spec.Selector, validation.LabelSelectorValidationOptions{}, f.Child("selector")) + errs = append(errs, verr...) + } + + if spec.Name != "" && spec.Selector != nil { + errs = append(errs, field.Invalid(f.Child("name"), spec, + "WorkloadSelectorSpec must specify either name or selector. Both were set.")) + } + if spec.Name == "" && spec.Selector == nil { + errs = append(errs, field.Invalid(f.Child("name"), spec, + "WorkloadSelectorSpec must specify either name or selector. Neither was set.")) + } + + _, gk := schema.ParseKindArg(spec.Kind) + var found bool + for _, kind := range supportedKinds { + if kind == gk.Kind { + found = true + break + } + } + if !found { + errs = append(errs, field.Invalid(f.Child("kind"), spec.Kind, + fmt.Sprintf("Kind was %q, must be one of CronJob, Job, StatefulSet, Deployment, DaemonSet or Pod", gk.Kind))) + + } + + return errs +} From 3ede42da9f502090d80b95970296f138484ef522 Mon Sep 17 00:00:00 2001 From: "Jonathan Hess (he/him)" <103529393+hessjcg@users.noreply.github.com> Date: Tue, 21 Feb 2023 10:31:56 -0700 Subject: [PATCH 23/29] feat: Add telemetry settings to configure health check port (#210) Allow the user to configure the health check port for a proxy container. This adds TelemetrySpec which will hold the health check port and other telemetry-related configuration. Related to #45 --- ...l.cloud.google.com_authproxyworkloads.yaml | 8 ++++ installer/cloud-sql-proxy-operator.yaml | 8 ++++ .../api/v1alpha1/authproxyworkload_types.go | 13 +++++++ .../api/v1alpha1/zz_generated.deepcopy.go | 25 ++++++++++++ internal/workload/podspec_updates.go | 38 +++++++++++++++++-- internal/workload/podspec_updates_test.go | 26 +++++++++++++ 6 files changed, 115 insertions(+), 3 deletions(-) diff --git a/config/crd/bases/cloudsql.cloud.google.com_authproxyworkloads.yaml b/config/crd/bases/cloudsql.cloud.google.com_authproxyworkloads.yaml index 9f01f75c..e68e2eea 100644 --- a/config/crd/bases/cloudsql.cloud.google.com_authproxyworkloads.yaml +++ b/config/crd/bases/cloudsql.cloud.google.com_authproxyworkloads.yaml @@ -902,6 +902,14 @@ spec: sqlAdminAPIEndpoint: description: SQLAdminAPIEndpoint is a debugging parameter that when specified will change the Google Cloud api endpoint used by the proxy. type: string + telemetry: + description: Telemetry specifies how the proxy should expose telemetry. Optional, by default + properties: + httpPort: + description: HTTPPort the port for Prometheus and health check server. This sets the proxy container's CLI argument `--http-port` + format: int32 + type: integer + type: object type: object instances: description: Instances lists the Cloud SQL instances to connect diff --git a/installer/cloud-sql-proxy-operator.yaml b/installer/cloud-sql-proxy-operator.yaml index fadf2fb4..81e45f1e 100644 --- a/installer/cloud-sql-proxy-operator.yaml +++ b/installer/cloud-sql-proxy-operator.yaml @@ -920,6 +920,14 @@ spec: sqlAdminAPIEndpoint: description: SQLAdminAPIEndpoint is a debugging parameter that when specified will change the Google Cloud api endpoint used by the proxy. type: string + telemetry: + description: Telemetry specifies how the proxy should expose telemetry. Optional, by default + properties: + httpPort: + description: HTTPPort the port for Prometheus and health check server. This sets the proxy container's CLI argument `--http-port` + format: int32 + type: integer + type: object type: object instances: description: Instances lists the Cloud SQL instances to connect diff --git a/internal/api/v1alpha1/authproxyworkload_types.go b/internal/api/v1alpha1/authproxyworkload_types.go index 659ca743..e3d88c60 100644 --- a/internal/api/v1alpha1/authproxyworkload_types.go +++ b/internal/api/v1alpha1/authproxyworkload_types.go @@ -136,6 +136,11 @@ type AuthProxyContainerSpec struct { //+kubebuilder:validation:Optional Resources *v1.ResourceRequirements `json:"resources,omitempty"` + // Telemetry specifies how the proxy should expose telemetry. + // Optional, by default + //+kubebuilder:validation:Optional + Telemetry *TelemetrySpec `json:"telemetry,omitempty"` + // MaxConnections limits the number of connections. Default value is no limit. // This sets the proxy container's CLI argument `--max-connections` //+kubebuilder:validation:Optional @@ -171,6 +176,14 @@ type AuthProxyContainerSpec struct { RolloutStrategy string `json:"rolloutStrategy,omitempty"` } +// TelemetrySpec specifies how the proxy container will expose telemetry. +type TelemetrySpec struct { + // HTTPPort the port for Prometheus and health check server. + // This sets the proxy container's CLI argument `--http-port` + //+kubebuilder:validation:Optional + HTTPPort *int32 `json:"httpPort,omitempty"` +} + // InstanceSpec describes the configuration for how the proxy should expose // a Cloud SQL database instance to a workload. The simplest possible configuration // declares just the connection string and the port number or unix socket. diff --git a/internal/api/v1alpha1/zz_generated.deepcopy.go b/internal/api/v1alpha1/zz_generated.deepcopy.go index e0340fe6..97ec9810 100644 --- a/internal/api/v1alpha1/zz_generated.deepcopy.go +++ b/internal/api/v1alpha1/zz_generated.deepcopy.go @@ -38,6 +38,11 @@ func (in *AuthProxyContainerSpec) DeepCopyInto(out *AuthProxyContainerSpec) { *out = new(corev1.ResourceRequirements) (*in).DeepCopyInto(*out) } + if in.Telemetry != nil { + in, out := &in.Telemetry, &out.Telemetry + *out = new(TelemetrySpec) + (*in).DeepCopyInto(*out) + } if in.MaxConnections != nil { in, out := &in.MaxConnections, &out.MaxConnections *out = new(int64) @@ -214,6 +219,26 @@ func (in *InstanceSpec) DeepCopy() *InstanceSpec { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TelemetrySpec) DeepCopyInto(out *TelemetrySpec) { + *out = *in + if in.HTTPPort != nil { + in, out := &in.HTTPPort, &out.HTTPPort + *out = new(int32) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TelemetrySpec. +func (in *TelemetrySpec) DeepCopy() *TelemetrySpec { + if in == nil { + return nil + } + out := new(TelemetrySpec) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *WorkloadSelectorSpec) DeepCopyInto(out *WorkloadSelectorSpec) { *out = *in diff --git a/internal/workload/podspec_updates.go b/internal/workload/podspec_updates.go index 30fc942f..e418e107 100644 --- a/internal/workload/podspec_updates.go +++ b/internal/workload/podspec_updates.go @@ -252,10 +252,15 @@ func (s *updateState) addWorkloadPort(p int32) { s.addPort(p, proxyInstanceID{}) } -func (s *updateState) addProxyPort(p int32) { +func (s *updateState) addProxyPort(port int32, p *cloudsqlapi.AuthProxyWorkload) { // This port is associated with the workload, not the proxy. // so this uses an empty proxyInstanceID{} - s.addPort(p, proxyInstanceID{}) + s.addPort(port, proxyInstanceID{ + AuthProxyWorkload: types.NamespacedName{ + Namespace: p.Namespace, + Name: p.Name, + }, + }) } // isPortInUse checks if the port is in use. @@ -627,7 +632,18 @@ func (s *updateState) updateContainerEnv(c *corev1.Container) { // addHealthCheck adds the health check declaration to this workload. func (s *updateState) addHealthCheck(p *cloudsqlapi.AuthProxyWorkload, c *corev1.Container) { - port := DefaultHealthCheckPort + var portPtr *int32 + + cs := p.Spec.AuthProxyContainer + + // if the TelemetrySpec.exists, get Port values + if cs != nil && cs.Telemetry != nil { + if cs.Telemetry.HTTPPort != nil { + portPtr = cs.Telemetry.HTTPPort + } + } + + port := s.usePort(portPtr, DefaultHealthCheckPort, p) c.StartupProbe = &corev1.Probe{ ProbeHandler: corev1.ProbeHandler{HTTPGet: &corev1.HTTPGetAction{ @@ -665,3 +681,19 @@ func (s *updateState) addError(errorCode, description string, p *cloudsqlapi.Aut func (s *updateState) defaultProxyImage() string { return DefaultProxyImage } + +func (s *updateState) usePort(configValue *int32, defaultValue int32, p *cloudsqlapi.AuthProxyWorkload) int32 { + if configValue != nil { + s.addProxyPort(*configValue, p) + return *configValue + } + + port := defaultValue + if configValue == nil { + for s.isPortInUse(port) { + port++ + } + } + s.addProxyPort(port, p) + return port +} diff --git a/internal/workload/podspec_updates_test.go b/internal/workload/podspec_updates_test.go index a4b7dd06..b5922947 100644 --- a/internal/workload/podspec_updates_test.go +++ b/internal/workload/podspec_updates_test.go @@ -622,6 +622,32 @@ func TestProxyCLIArgs(t *testing.T) { fmt.Sprintf("hello:world:one?port=%d&private-ip=true", workload.DefaultFirstPort), fmt.Sprintf("hello:world:two?port=%d&private-ip=false", workload.DefaultFirstPort+1)}, }, + { + desc: "global flags", + proxySpec: v1alpha1.AuthProxyWorkloadSpec{ + AuthProxyContainer: &v1alpha1.AuthProxyContainerSpec{ + SQLAdminAPIEndpoint: "https://example.com", + Telemetry: &v1alpha1.TelemetrySpec{ + HTTPPort: ptr(int32(9092)), + }, + MaxConnections: ptr(int64(10)), + MaxSigtermDelay: ptr(int64(20)), + }, + Instances: []v1alpha1.InstanceSpec{{ + ConnectionString: "hello:world:one", + }}, + }, + wantProxyArgContains: []string{ + fmt.Sprintf("hello:world:one?port=%d", workload.DefaultFirstPort), + }, + wantWorkloadEnv: map[string]string{ + "CSQL_PROXY_SQLADMIN_API_ENDPOINT": "https://example.com", + "CSQL_PROXY_HTTP_PORT": "9092", + "CSQL_PROXY_HEALTH_CHECK": "true", + "CSQL_PROXY_MAX_CONNECTIONS": "10", + "CSQL_PROXY_MAX_SIGTERM_DELAY": "20", + }, + }, { desc: "port conflict with other instance causes error", proxySpec: v1alpha1.AuthProxyWorkloadSpec{ From 4304283c1e85b079aab5cbf6c4c2dafb73ed654a Mon Sep 17 00:00:00 2001 From: "Jonathan Hess (he/him)" <103529393+hessjcg@users.noreply.github.com> Date: Tue, 21 Feb 2023 11:25:04 -0700 Subject: [PATCH 24/29] feat: Validate AuthProxyWorkload updates to prevent changes to the workload selector. (#211) We do not allow a user to change the selector of an AuthProxyWorkload, as it will require the operator to do too much bookkeeping. We can keep our code clean and simple if the operator assumes that the workload selected for a proxy configuration never changes. Related to #36 --- .../api/v1alpha1/authproxyworkload_test.go | 81 ++++++++++++++++++- .../api/v1alpha1/authproxyworkload_webhook.go | 76 ++++++++++++++--- 2 files changed, 144 insertions(+), 13 deletions(-) diff --git a/internal/api/v1alpha1/authproxyworkload_test.go b/internal/api/v1alpha1/authproxyworkload_test.go index 91faef88..d6e2a645 100644 --- a/internal/api/v1alpha1/authproxyworkload_test.go +++ b/internal/api/v1alpha1/authproxyworkload_test.go @@ -145,6 +145,82 @@ func TestAuthProxyWorkload_ValidateUpdate(t *testing.T) { }, wantValid: true, }, + { + desc: "Invalid, WorkloadSelectorSpec.Kind changed", + spec: cloudsqlapi.AuthProxyWorkloadSpec{ + Workload: cloudsqlapi.WorkloadSelectorSpec{ + Kind: "Deployment", + Name: "webapp", + }, + Instances: []cloudsqlapi.InstanceSpec{{ + ConnectionString: "proj:region:db2", + PortEnvName: "DB_PORT", + }}, + }, + oldSpec: cloudsqlapi.AuthProxyWorkloadSpec{ + Workload: cloudsqlapi.WorkloadSelectorSpec{ + Kind: "StatefulSet", + Name: "webapp", + }, + Instances: []cloudsqlapi.InstanceSpec{{ + ConnectionString: "proj:region:db1", + PortEnvName: "DB_PORT", + }}, + }, + wantValid: false, + }, + { + desc: "Invalid, WorkloadSelectorSpec.Name changed", + spec: cloudsqlapi.AuthProxyWorkloadSpec{ + Workload: cloudsqlapi.WorkloadSelectorSpec{ + Kind: "Deployment", + Name: "things", + }, + Instances: []cloudsqlapi.InstanceSpec{{ + ConnectionString: "proj:region:db2", + PortEnvName: "DB_PORT", + }}, + }, + oldSpec: cloudsqlapi.AuthProxyWorkloadSpec{ + Workload: cloudsqlapi.WorkloadSelectorSpec{ + Kind: "Deployment", + Name: "webapp", + }, + Instances: []cloudsqlapi.InstanceSpec{{ + ConnectionString: "proj:region:db1", + PortEnvName: "DB_PORT", + }}, + }, + wantValid: false, + }, + { + desc: "Invalid, WorkloadSelectorSpec.Selector changed", + spec: cloudsqlapi.AuthProxyWorkloadSpec{ + Workload: cloudsqlapi.WorkloadSelectorSpec{ + Kind: "Deployment", + Selector: &v1.LabelSelector{ + MatchLabels: map[string]string{"app": "sample"}, + }, + }, + Instances: []cloudsqlapi.InstanceSpec{{ + ConnectionString: "proj:region:db2", + PortEnvName: "DB_PORT", + }}, + }, + oldSpec: cloudsqlapi.AuthProxyWorkloadSpec{ + Workload: cloudsqlapi.WorkloadSelectorSpec{ + Kind: "Deployment", + Selector: &v1.LabelSelector{ + MatchLabels: map[string]string{"app": "other"}, + }, + }, + Instances: []cloudsqlapi.InstanceSpec{{ + ConnectionString: "proj:region:db1", + PortEnvName: "DB_PORT", + }}, + }, + wantValid: false, + }, } for _, tc := range data { @@ -155,7 +231,8 @@ func TestAuthProxyWorkload_ValidateUpdate(t *testing.T) { } oldP := cloudsqlapi.AuthProxyWorkload{ ObjectMeta: v1.ObjectMeta{Name: "sample"}, - Spec: tc.oldSpec} + Spec: tc.oldSpec, + } err := p.ValidateUpdate(&oldP) gotValid := err == nil @@ -164,7 +241,7 @@ func TestAuthProxyWorkload_ValidateUpdate(t *testing.T) { case tc.wantValid && !gotValid: t.Errorf("wants create valid, got error %v", err) case !tc.wantValid && gotValid: - t.Errorf("wants an error on create, got no error") + t.Errorf("wants an error on update, got no error") default: t.Logf("update passed %s", tc.desc) // test passes, do nothing. diff --git a/internal/api/v1alpha1/authproxyworkload_webhook.go b/internal/api/v1alpha1/authproxyworkload_webhook.go index c05cafdf..5f96d31d 100644 --- a/internal/api/v1alpha1/authproxyworkload_webhook.go +++ b/internal/api/v1alpha1/authproxyworkload_webhook.go @@ -16,8 +16,10 @@ package v1alpha1 import ( "fmt" + "reflect" apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/validation" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" @@ -53,12 +55,36 @@ var _ webhook.Validator = &AuthProxyWorkload{} // ValidateCreate implements webhook.Validator so a webhook will be registered for the type func (r *AuthProxyWorkload) ValidateCreate() error { - return r.validate() + allErrs := r.validate() + if len(allErrs) > 0 { + return apierrors.NewInvalid( + schema.GroupKind{ + Group: GroupVersion.Group, + Kind: "AuthProxyWorkload"}, + r.Name, allErrs) + } + return nil + } // ValidateUpdate implements webhook.Validator so a webhook will be registered for the type -func (r *AuthProxyWorkload) ValidateUpdate(_ runtime.Object) error { - return r.validate() +func (r *AuthProxyWorkload) ValidateUpdate(old runtime.Object) error { + o, ok := old.(*AuthProxyWorkload) + if !ok { + return fmt.Errorf("bad request, expected old to be an AuthProxyWorkload") + } + + allErrs := r.validate() + allErrs = append(allErrs, r.validateUpdateFrom(o)...) + if len(allErrs) > 0 { + return apierrors.NewInvalid( + schema.GroupKind{ + Group: GroupVersion.Group, + Kind: "AuthProxyWorkload"}, + r.Name, allErrs) + } + return nil + } // ValidateDelete implements webhook.Validator so a webhook will be registered for the type @@ -66,20 +92,48 @@ func (r *AuthProxyWorkload) ValidateDelete() error { return nil } -func (r *AuthProxyWorkload) validate() error { +func (r *AuthProxyWorkload) validate() field.ErrorList { var allErrs field.ErrorList allErrs = append(allErrs, validation.ValidateLabelName(r.Name, field.NewPath("metadata", "name"))...) allErrs = append(allErrs, validateWorkload(&r.Spec.Workload, field.NewPath("spec", "workload"))...) - if len(allErrs) > 0 { - return apierrors.NewInvalid( - schema.GroupKind{ - Group: GroupVersion.Group, - Kind: "AuthProxyWorkload"}, - r.Name, allErrs) + return allErrs + +} + +func (r *AuthProxyWorkload) validateUpdateFrom(op *AuthProxyWorkload) field.ErrorList { + var allErrs field.ErrorList + + if r.Spec.Workload.Kind != op.Spec.Workload.Kind { + allErrs = append(allErrs, field.Invalid( + field.NewPath("spec", "workload", "kind"), r.Spec.Workload.Kind, + "kind cannot be changed on update")) } - return nil + if r.Spec.Workload.Name != op.Spec.Workload.Name { + allErrs = append(allErrs, field.Invalid( + field.NewPath("spec", "workload", "name"), r.Spec.Workload.Name, + "kind cannot be changed on update")) + } + if selectorNotEqual(r.Spec.Workload.Selector, op.Spec.Workload.Selector) { + allErrs = append(allErrs, field.Invalid( + field.NewPath("spec", "workload", "selector"), r.Spec.Workload.Selector, + "selector cannot be changed on update")) + } + + return allErrs +} + +func selectorNotEqual(s *metav1.LabelSelector, os *metav1.LabelSelector) bool { + if s == nil && os == nil { + return false + } + + if s != nil && os != nil { + return !reflect.DeepEqual(s, os) + } + + return true } var supportedKinds = []string{"CronJob", "Job", "StatefulSet", "Deployment", "DaemonSet", "ReplicaSet", "Pod"} From 2c16597cf21a75b8a472b398f57313cbf1dc45e6 Mon Sep 17 00:00:00 2001 From: "Jonathan Hess (he/him)" <103529393+hessjcg@users.noreply.github.com> Date: Tue, 21 Feb 2023 13:21:02 -0700 Subject: [PATCH 25/29] chore: update to use latest proxy image: 2.1.0 (#214) --- internal/workload/podspec_updates.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/workload/podspec_updates.go b/internal/workload/podspec_updates.go index e418e107..29240f02 100644 --- a/internal/workload/podspec_updates.go +++ b/internal/workload/podspec_updates.go @@ -35,7 +35,7 @@ import ( // package and documented here so that they appear in the godoc. These also // need to be documented in the CRD const ( - DefaultProxyImage = "gcr.io/cloud-sql-connectors/cloud-sql-proxy:2.0.0" + DefaultProxyImage = "gcr.io/cloud-sql-connectors/cloud-sql-proxy:2.1.0" // DefaultFirstPort is the first port number chose for an instance listener by the // proxy. From 8177a35be7988a01de682d806c05b9306537c3a1 Mon Sep 17 00:00:00 2001 From: "Jonathan Hess (he/him)" <103529393+hessjcg@users.noreply.github.com> Date: Tue, 21 Feb 2023 14:15:56 -0700 Subject: [PATCH 26/29] feat: Add support for Unix sockets. (#205) The operator will configure the pod with a proxy that listens on a unix socket. The operator will mount the parent directory of each instance's unixSocketPath to each container in the pod. When the proxy starts, it will create a unix socket for the instance in that mounted directory. Care should be take to ensure that the parent directory can safely be used across all containers in the pod. Fixes #47 --- ...l.cloud.google.com_authproxyworkloads.yaml | 6 + docs/authproxyworkload-reference.md | 33 +++++ installer/cloud-sql-proxy-operator.yaml | 6 + .../api/v1alpha1/authproxyworkload_types.go | 10 ++ internal/testhelpers/resources.go | 88 ++++++++++-- internal/workload/names.go | 9 +- internal/workload/names_test.go | 2 +- internal/workload/podspec_updates.go | 129 ++++++++++++++++-- internal/workload/podspec_updates_test.go | 89 ++++++++++++ tests/e2e_test.go | 43 +++++- 10 files changed, 374 insertions(+), 41 deletions(-) create mode 100644 docs/authproxyworkload-reference.md diff --git a/config/crd/bases/cloudsql.cloud.google.com_authproxyworkloads.yaml b/config/crd/bases/cloudsql.cloud.google.com_authproxyworkloads.yaml index e68e2eea..25ee007d 100644 --- a/config/crd/bases/cloudsql.cloud.google.com_authproxyworkloads.yaml +++ b/config/crd/bases/cloudsql.cloud.google.com_authproxyworkloads.yaml @@ -935,6 +935,12 @@ spec: privateIP: description: PrivateIP Enable connection to the Cloud SQL instance's private ip for this instance. Optional, default false. type: boolean + unixSocketPath: + description: UnixSocketPath is the path to the unix socket where the proxy will listen for connnections. This will be mounted to all containers in the pod. + type: string + unixSocketPathEnvName: + description: UnixSocketPathEnvName is the environment variable containing the value of UnixSocketPath. + type: string type: object minItems: 1 type: array diff --git a/docs/authproxyworkload-reference.md b/docs/authproxyworkload-reference.md new file mode 100644 index 00000000..40b6a8c7 --- /dev/null +++ b/docs/authproxyworkload-reference.md @@ -0,0 +1,33 @@ +# AuthProxyWorkload Reference Documentation + +Containing important details about how the AuthProxyWorkload resource can +be configured. + +## Port and PortEnvName + +If HostEnvName is set, the operator will set an EnvVar with the value 127.0.0.1. +If HostEnvName is empty, then no EnvVar is set. + +If PortEnvName is set, the operator will set an EnvVar with the port number used +for that +instance. If PortEnvName is empty, then no EnvVar is set. + +The port used for an instance is computed by `updateState.useInstancePort()` +which ensures that either Port is used if set, or else a non-conflicting port +number is chosen by the operator. + +At least one of Port and PortEnvName must be set for the configuration to be +valid. (We need to add this validation to the operator. It will be handled in +authproxyworkload_webhook.go) + +This is how Port and PortEnvName should interact: + +| PortEnvName | Port | proxy port args | container env | +|------------------|-------------|-----------------|----------------| +| set to "DB_PORT" | not set | ?port={next} | DB_PORT={next} | +| set to "DB_PORT" | set to 5401 | ?port=5401 | DB_PORT=5401. | +| not set | set to 5401 | ?port=5401 | not set | +| not set | not set | invalid. | invalid | + + + \ No newline at end of file diff --git a/installer/cloud-sql-proxy-operator.yaml b/installer/cloud-sql-proxy-operator.yaml index 81e45f1e..6252c02f 100644 --- a/installer/cloud-sql-proxy-operator.yaml +++ b/installer/cloud-sql-proxy-operator.yaml @@ -953,6 +953,12 @@ spec: privateIP: description: PrivateIP Enable connection to the Cloud SQL instance's private ip for this instance. Optional, default false. type: boolean + unixSocketPath: + description: UnixSocketPath is the path to the unix socket where the proxy will listen for connnections. This will be mounted to all containers in the pod. + type: string + unixSocketPathEnvName: + description: UnixSocketPathEnvName is the environment variable containing the value of UnixSocketPath. + type: string type: object minItems: 1 type: array diff --git a/internal/api/v1alpha1/authproxyworkload_types.go b/internal/api/v1alpha1/authproxyworkload_types.go index e3d88c60..edcb89cd 100644 --- a/internal/api/v1alpha1/authproxyworkload_types.go +++ b/internal/api/v1alpha1/authproxyworkload_types.go @@ -252,6 +252,16 @@ type InstanceSpec struct { // Optional, when set this environment variable will be added to all containers in the workload. //+kubebuilder:validation:Optional HostEnvName string `json:"hostEnvName,omitempty"` + + // UnixSocketPath is the path to the unix socket where the proxy will listen + // for connnections. This will be mounted to all containers in the pod. + //+kubebuilder:validation:Optional + UnixSocketPath string `json:"unixSocketPath,omitempty"` + + // UnixSocketPathEnvName is the environment variable containing the value of + // UnixSocketPath. + //+kubebuilder:validation:Optional + UnixSocketPathEnvName string `json:"unixSocketPathEnvName,omitempty"` } // AuthProxyWorkloadStatus presents the observed state of AuthProxyWorkload using diff --git a/internal/testhelpers/resources.go b/internal/testhelpers/resources.go index d976f257..2bf66635 100644 --- a/internal/testhelpers/resources.go +++ b/internal/testhelpers/resources.go @@ -89,6 +89,20 @@ func BuildPgPodSpec(mainPodSleep int, appLabel, secretName string) corev1.PodTem return buildConnectPodSpec(mainPodSleep, appLabel, secretName, livenessCmd, passEnvVarName, imageName) } +// BuildPgUnixPodSpec creates a podspec specific to Postgres databases that will +// connect via a unix socket and run a trivial. It also configures the +// pod's Liveness probe so that the pod's `Ready` condition is `Ready` when the +// database can connect. +func BuildPgUnixPodSpec(mainPodSleep int, appLabel, secretName string) corev1.PodTemplateSpec { + const ( + livenessCmd = "psql --host=$DB_PATH --username=$DB_USER '--command=select 1' --echo-queries --dbname=$DB_NAME" + imageName = "postgres" + passEnvVarName = "PGPASSWORD" + ) + + return buildConnectPodSpec(mainPodSleep, appLabel, secretName, livenessCmd, passEnvVarName, imageName) +} + // BuildMySQLPodSpec creates a podspec specific to MySQL databases that will connect // and run a trivial query. It also configures the pod's Liveness probe so that // the pod's `Ready` condition is `Ready` when the database can connect. @@ -102,6 +116,20 @@ func BuildMySQLPodSpec(mainPodSleep int, appLabel, secretName string) corev1.Pod return buildConnectPodSpec(mainPodSleep, appLabel, secretName, livenessCmd, passEnvVarName, imageName) } +// BuildMySQLUnixPodSpec creates a podspec specific to MySQL databases that will +// connect via a unix socket and run a trivial query. It also configures the +// pod's Liveness probe so that the pod's `Ready` condition is `Ready` when the +// database can connect. +func BuildMySQLUnixPodSpec(mainPodSleep int, appLabel, secretName string) corev1.PodTemplateSpec { + const ( + livenessCmd = "mysql -S $DB_PATH --user=$DB_USER --password=$DB_PASS --database=$DB_NAME '--execute=select now()' " + imageName = "mysql" + passEnvVarName = "DB_PASS" + ) + + return buildConnectPodSpec(mainPodSleep, appLabel, secretName, livenessCmd, passEnvVarName, imageName) +} + // BuildMSSQLPodSpec creates a podspec specific to MySQL databases that will connect // and run a trivial query. It also configures the pod's Liveness probe so that // the pod's `Ready` condition is `Ready` when the database can connect. @@ -572,8 +600,36 @@ func (cc *TestCaseClient) CreateDeploymentReplicaSetAndPods(ctx context.Context, } // BuildAuthProxyWorkload creates an AuthProxyWorkload object with a -// single connection instance. +// single instance with a tcp connection. func BuildAuthProxyWorkload(key types.NamespacedName, connectionString string) *v1alpha1.AuthProxyWorkload { + p := NewAuthProxyWorkload(key) + AddTCPInstance(p, connectionString) + return p +} + +// AddTCPInstance adds a database instance with a tcp connection, setting +// HostEnvName to "DB_HOST" and PortEnvName to "DB_PORT". +func AddTCPInstance(p *v1alpha1.AuthProxyWorkload, connectionString string) { + p.Spec.Instances = append(p.Spec.Instances, v1alpha1.InstanceSpec{ + ConnectionString: connectionString, + HostEnvName: "DB_HOST", + PortEnvName: "DB_PORT", + }) +} + +// AddUnixInstance adds a database instance with a unix socket connection, +// setting UnixSocketPathEnvName to "DB_PATH". +func AddUnixInstance(p *v1alpha1.AuthProxyWorkload, connectionString string, path string) { + p.Spec.Instances = append(p.Spec.Instances, v1alpha1.InstanceSpec{ + ConnectionString: connectionString, + UnixSocketPath: path, + UnixSocketPathEnvName: "DB_PATH", + }) +} + +// NewAuthProxyWorkload creates a new AuthProxyWorkload with the +// TypeMeta, name and namespace set. +func NewAuthProxyWorkload(key types.NamespacedName) *v1alpha1.AuthProxyWorkload { return &v1alpha1.AuthProxyWorkload{ TypeMeta: metav1.TypeMeta{ APIVersion: v1alpha1.GroupVersion.String(), @@ -583,26 +639,31 @@ func BuildAuthProxyWorkload(key types.NamespacedName, connectionString string) * Name: key.Name, Namespace: key.Namespace, }, - Spec: v1alpha1.AuthProxyWorkloadSpec{ - - Instances: []v1alpha1.InstanceSpec{{ - ConnectionString: connectionString, - HostEnvName: "DB_HOST", - PortEnvName: "DB_PORT", - }}, - }, } } // CreateAuthProxyWorkload creates an AuthProxyWorkload in the kubernetes cluster. func (cc *TestCaseClient) CreateAuthProxyWorkload(ctx context.Context, key types.NamespacedName, appLabel string, connectionString string, kind string) (*v1alpha1.AuthProxyWorkload, error) { - proxy := BuildAuthProxyWorkload(key, connectionString) + p := NewAuthProxyWorkload(key) + AddTCPInstance(p, connectionString) + cc.ConfigureSelector(p, appLabel, kind) + cc.ConfigureResources(p) + return p, cc.Create(ctx, p) +} + +// ConfigureSelector Configures the workload selector on AuthProxyWorkload to use the label selector +// "app=${appLabel}" +func (cc *TestCaseClient) ConfigureSelector(proxy *v1alpha1.AuthProxyWorkload, appLabel string, kind string) { proxy.Spec.Workload = v1alpha1.WorkloadSelectorSpec{ Kind: kind, Selector: &metav1.LabelSelector{ MatchLabels: map[string]string{"app": appLabel}, }, } +} + +// ConfigureResources Configures resource requests +func (cc *TestCaseClient) ConfigureResources(proxy *v1alpha1.AuthProxyWorkload) { proxy.Spec.AuthProxyContainer = &v1alpha1.AuthProxyContainerSpec{ Image: cc.ProxyImageURL, Resources: &corev1.ResourceRequirements{ @@ -611,11 +672,14 @@ func (cc *TestCaseClient) CreateAuthProxyWorkload(ctx context.Context, key types }, }, } +} + +func (cc *TestCaseClient) Create(ctx context.Context, proxy *v1alpha1.AuthProxyWorkload) error { err := cc.Client.Create(ctx, proxy) if err != nil { - return nil, fmt.Errorf("Unable to create entity %v", err) + return fmt.Errorf("Unable to create entity %v", err) } - return proxy, nil + return nil } // GetConditionStatus finds a condition where Condition.Type == condType and returns diff --git a/internal/workload/names.go b/internal/workload/names.go index 39977975..b3087a09 100644 --- a/internal/workload/names.go +++ b/internal/workload/names.go @@ -20,19 +20,12 @@ import ( "strings" cloudsqlapi "github.com/GoogleCloudPlatform/cloud-sql-proxy-operator/internal/api/v1alpha1" - "k8s.io/apimachinery/pkg/types" ) // ContainerPrefix is the name prefix used on containers added to PodSpecs // by this operator. const ContainerPrefix = "csql-" -// ContainerNameFromNamespacedName generates a valid name for a container, following -// identical logic to ContainerName -func ContainerNameFromNamespacedName(r types.NamespacedName) string { - return SafePrefixedName(ContainerPrefix, r.Namespace+"-"+r.Name) -} - // ContainerName generates a valid name for a corev1.Container object that // implements this cloudsql instance. Names must be 63 characters or fewer and // adhere to the rfc1035/rfc1123 label (DNS_LABEL) format. r.ObjectMeta.Name @@ -47,7 +40,7 @@ func ContainerName(r *cloudsqlapi.AuthProxyWorkload) string { // name and the Cloud SQL instance name. func VolumeName(r *cloudsqlapi.AuthProxyWorkload, inst *cloudsqlapi.InstanceSpec, mountType string) string { connName := strings.ReplaceAll(strings.ToLower(inst.ConnectionString), ":", "-") - return SafePrefixedName(ContainerPrefix, r.GetNamespace()+"-"+r.GetName()+"-"+mountType+"-"+connName) + return SafePrefixedName(ContainerPrefix, r.GetName()+"-"+mountType+"-"+connName) } // SafePrefixedName adds a prefix to a name and shortens it while preserving its uniqueness diff --git a/internal/workload/names_test.go b/internal/workload/names_test.go index fe57a229..8adf2efc 100644 --- a/internal/workload/names_test.go +++ b/internal/workload/names_test.go @@ -84,7 +84,7 @@ func TestContainerName(t *testing.T) { func TestVolumeName(t *testing.T) { csql := authProxyWorkload("hello-world", []v1alpha1.InstanceSpec{{ConnectionString: "proj:inst:db"}}) got := workload.VolumeName(csql, &csql.Spec.Instances[0], "temp") - want := "csql-default-hello-world-temp-proj-inst-db" + want := "csql-hello-world-temp-proj-inst-db" if want != got { t.Errorf("got %v, want %v", got, want) } diff --git a/internal/workload/podspec_updates.go b/internal/workload/podspec_updates.go index 29240f02..4c0a90a5 100644 --- a/internal/workload/podspec_updates.go +++ b/internal/workload/podspec_updates.go @@ -16,6 +16,7 @@ package workload import ( "fmt" + "path" "sort" "strings" "time" @@ -474,7 +475,9 @@ func (s *updateState) update(wl *PodWorkload, matches []*cloudsqlapi.AuthProxyWo for i := range podSpec.Containers { c := &podSpec.Containers[i] s.updateContainerEnv(c) + s.applyContainerVolumes(c) } + s.applyVolumes(&podSpec) // only return ConfigError if there were reported // errors during processing. @@ -521,20 +524,44 @@ func (s *updateState) updateContainer(p *cloudsqlapi.AuthProxyWorkload, wl Workl params := map[string]string{} // if it is a TCP socket + if inst.UnixSocketPath == "" { + + port := s.useInstancePort(p, inst) + params["port"] = fmt.Sprint(port) + if inst.HostEnvName != "" { + s.addWorkloadEnvVar(p, inst, corev1.EnvVar{ + Name: inst.HostEnvName, + Value: "127.0.0.1", + }) + } + if inst.PortEnvName != "" { + s.addWorkloadEnvVar(p, inst, corev1.EnvVar{ + Name: inst.PortEnvName, + Value: fmt.Sprint(port), + }) + } + } else { + // else if it is a unix socket + params["unix-socket-path"] = inst.UnixSocketPath + mountName := VolumeName(p, inst, "unix") + s.addVolumeMount(p, inst, + corev1.VolumeMount{ + Name: mountName, + ReadOnly: false, + MountPath: path.Dir(inst.UnixSocketPath), + }, + corev1.Volume{ + Name: mountName, + VolumeSource: corev1.VolumeSource{EmptyDir: &corev1.EmptyDirVolumeSource{}}, + }) + + if inst.UnixSocketPathEnvName != "" { + s.addWorkloadEnvVar(p, inst, corev1.EnvVar{ + Name: inst.UnixSocketPathEnvName, + Value: inst.UnixSocketPath, + }) + } - port := s.useInstancePort(p, inst) - params["port"] = fmt.Sprint(port) - if inst.HostEnvName != "" { - s.addWorkloadEnvVar(p, inst, corev1.EnvVar{ - Name: inst.HostEnvName, - Value: "127.0.0.1", - }) - } - if inst.PortEnvName != "" { - s.addWorkloadEnvVar(p, inst, corev1.EnvVar{ - Name: inst.PortEnvName, - Value: fmt.Sprint(port), - }) } if inst.AutoIAMAuthN != nil { @@ -674,6 +701,82 @@ func (s *updateState) addHealthCheck(p *cloudsqlapi.AuthProxyWorkload, c *corev1 return } +func (s *updateState) addVolumeMount(p *cloudsqlapi.AuthProxyWorkload, is *cloudsqlapi.InstanceSpec, m corev1.VolumeMount, v corev1.Volume) { + key := proxyInstanceID{ + AuthProxyWorkload: types.NamespacedName{ + Namespace: p.Namespace, + Name: p.Name, + }, + ConnectionString: is.ConnectionString, + } + vol := &managedVolume{ + Instance: key, + Volume: v, + VolumeMount: m, + } + + for i, mount := range s.mods.VolumeMounts { + if mount.Instance == key { + s.mods.VolumeMounts[i] = vol + return + } + if mount.VolumeMount.MountPath == vol.VolumeMount.MountPath { + // avoid adding volume mounts with redundant MountPaths, + // just the first one is enough. + return + } + } + s.mods.VolumeMounts = append(s.mods.VolumeMounts, vol) +} + +// applyContainerVolumes applies all the VolumeMounts to this container. +func (s *updateState) applyContainerVolumes(c *corev1.Container) { + nameAccessor := func(v corev1.VolumeMount) string { + return v.Name + } + thingAccessor := func(v *managedVolume) corev1.VolumeMount { + return v.VolumeMount + } + c.VolumeMounts = applyVolumeThings[corev1.VolumeMount](s, c.VolumeMounts, nameAccessor, thingAccessor) +} + +// applyVolumes applies all volumes to this PodSpec. +func (s *updateState) applyVolumes(ps *corev1.PodSpec) { + nameAccessor := func(v corev1.Volume) string { + return v.Name + } + thingAccessor := func(v *managedVolume) corev1.Volume { + return v.Volume + } + ps.Volumes = applyVolumeThings[corev1.Volume](s, ps.Volumes, nameAccessor, thingAccessor) +} + +// applyVolumeThings modifies a slice of Volume/VolumeMount, to include all the +// shared volumes for the proxy container's unix sockets. This will replace +// an existing volume with the same name, or append a new volume to the slice. +func applyVolumeThings[T corev1.VolumeMount | corev1.Volume]( + s *updateState, + newVols []T, + nameAccessor func(T) string, + thingAccessor func(*managedVolume) T) []T { + + // add or replace items for all new volume mounts + for i := 0; i < len(s.mods.VolumeMounts); i++ { + var found bool + newVol := thingAccessor(s.mods.VolumeMounts[i]) + for j := 0; j < len(newVols); j++ { + if nameAccessor(newVol) == nameAccessor(newVols[j]) { + found = true + newVols[j] = newVol + } + } + if !found { + newVols = append(newVols, newVol) + } + } + return newVols +} + func (s *updateState) addError(errorCode, description string, p *cloudsqlapi.AuthProxyWorkload) { s.err.add(errorCode, description, p) } diff --git a/internal/workload/podspec_updates_test.go b/internal/workload/podspec_updates_test.go index b5922947..85af0bfd 100644 --- a/internal/workload/podspec_updates_test.go +++ b/internal/workload/podspec_updates_test.go @@ -874,3 +874,92 @@ func TestPodAnnotation(t *testing.T) { } } } + +func TestWorkloadUnixVolume(t *testing.T) { + var ( + wantsInstanceName = "project:server:db" + wantsInstanceName2 = "project:server:db2" + wantsUnixSocketPath = "/mnt/db/server" + wantsUnixSocketPath2 = "/mnt/db/server2" + wantUnixMountDir = "/mnt/db" + wantContainerArgs = []string{ + fmt.Sprintf("%s?unix-socket-path=%s", wantsInstanceName, wantsUnixSocketPath), + fmt.Sprintf("%s?unix-socket-path=%s", wantsInstanceName2, wantsUnixSocketPath2), + } + wantWorkloadEnv = map[string]string{ + "DB_SOCKET_PATH": wantsUnixSocketPath, + } + u = workload.NewUpdater("authproxyworkload/dev") + ) + + // Create a pod + wl := podWorkload() + wl.Pod.Spec.Containers[0].Ports = + []corev1.ContainerPort{{Name: "http", ContainerPort: 8080}} + + // Create a AuthProxyWorkload that matches the deployment + csqls := []*v1alpha1.AuthProxyWorkload{ + authProxyWorkload("instance1", []v1alpha1.InstanceSpec{{ + ConnectionString: wantsInstanceName, + UnixSocketPath: wantsUnixSocketPath, + UnixSocketPathEnvName: "DB_SOCKET_PATH", + }, { + ConnectionString: wantsInstanceName2, + UnixSocketPath: wantsUnixSocketPath2, + UnixSocketPathEnvName: "DB_SOCKET_PATH2", + }}), + } + + // update the containers + err := configureProxies(u, wl, csqls) + if err != nil { + t.Fatal(err) + } + + // ensure that the new container exists + if len(wl.Pod.Spec.Containers) != 2 { + t.Fatalf("got %v, wants 1. deployment containers length", len(wl.Pod.Spec.Containers)) + } + + // test that the instancename matches the new expected instance name. + csqlContainer, err := findContainer(wl, fmt.Sprintf("csql-default-%s", csqls[0].GetName())) + if err != nil { + t.Fatal(err) + } + + // test that port cli args are set correctly + assertContainerArgsContains(t, csqlContainer.Args, wantContainerArgs) + + // Test that workload has the right env vars + for wantKey, wantValue := range wantWorkloadEnv { + gotEnvVar, err := findEnvVar(wl, "busybox", wantKey) + if err != nil { + t.Error(err) + logPodSpec(t, wl) + } else if gotEnvVar.Value != wantValue { + t.Errorf("got %v, wants %v workload env var %v", gotEnvVar, wantValue, wantKey) + + } + } + + // test that Volume exists + if want, got := 1, len(wl.Pod.Spec.Volumes); want != got { + t.Fatalf("got %v, wants %v. PodSpec.Volumes", got, want) + } + + // test that Volume mount exists on busybox + busyboxContainer, err := findContainer(wl, "busybox") + if err != nil { + t.Fatal(err) + } + if want, got := 1, len(busyboxContainer.VolumeMounts); want != got { + t.Fatalf("got %v, wants %v. Busybox Container.VolumeMounts", got, want) + } + if want, got := wantUnixMountDir, busyboxContainer.VolumeMounts[0].MountPath; want != got { + t.Fatalf("got %v, wants %v. Busybox Container.VolumeMounts.MountPath", got, want) + } + if want, got := wl.Pod.Spec.Volumes[0].Name, busyboxContainer.VolumeMounts[0].Name; want != got { + t.Fatalf("got %v, wants %v. Busybox Container.VolumeMounts.MountPath", got, want) + } + +} diff --git a/tests/e2e_test.go b/tests/e2e_test.go index 57f0d54a..32a702e4 100644 --- a/tests/e2e_test.go +++ b/tests/e2e_test.go @@ -276,10 +276,11 @@ func TestPublicDBConnections(t *testing.T) { ) tests := []struct { - name string - c *testhelpers.TestCaseClient - podTemplate corev1.PodTemplateSpec - allOrAny string + name string + c *testhelpers.TestCaseClient + podTemplate corev1.PodTemplateSpec + allOrAny string + isUnixSocket bool }{ { name: "postgres", @@ -287,12 +288,26 @@ func TestPublicDBConnections(t *testing.T) { podTemplate: testhelpers.BuildPgPodSpec(600, appLabel, "db-secret"), allOrAny: "all", }, + { + name: "postgres-unix", + c: newPublicPostgresClient("pgconnunix"), + podTemplate: testhelpers.BuildPgUnixPodSpec(600, appLabel, "db-secret"), + allOrAny: "all", + isUnixSocket: true, + }, { name: "mysql", c: newPublicMySQLClient("mysqlconn"), podTemplate: testhelpers.BuildMySQLPodSpec(600, appLabel, "db-secret"), allOrAny: "all", }, + { + name: "mysql-unix", + c: newPublicMySQLClient("mysqlconnunix"), + podTemplate: testhelpers.BuildMySQLUnixPodSpec(600, appLabel, "db-secret"), + allOrAny: "all", + isUnixSocket: true, + }, { name: "mssql", c: newPublicMSSQLClient("mssqlconn"), @@ -335,9 +350,20 @@ func TestPublicDBConnections(t *testing.T) { wl.Deployment.Spec.Template = test.podTemplate t.Log("Creating AuthProxyWorkload") - _, err = tp.CreateAuthProxyWorkload(ctx, key, appLabel, tp.ConnectionString, kind) - if err != nil { - t.Fatal(err) + if test.isUnixSocket { + p := testhelpers.NewAuthProxyWorkload(key) + testhelpers.AddUnixInstance(p, tp.ConnectionString, "/var/tests/dbsocket") + tp.ConfigureSelector(p, appLabel, kind) + tp.ConfigureResources(p) + err = tp.Create(ctx, p) + if err != nil { + t.Fatal(err) + } + } else { + _, err = tp.CreateAuthProxyWorkload(ctx, key, appLabel, tp.ConnectionString, kind) + if err != nil { + t.Fatal(err) + } } t.Log("Waiting for AuthProxyWorkload operator to begin the reconcile loop") @@ -361,6 +387,9 @@ func TestPublicDBConnections(t *testing.T) { if err != nil { t.Error(err) } + + // The pods are configured to only be ready when the real database client + // successfully executes a simple query on the database. t.Log("Checking for ready", kind) err = tp.ExpectPodReady(ctx, selector, "all") if err != nil { From c1a15de92a373974327d59019d594ad51c35db18 Mon Sep 17 00:00:00 2001 From: "Jonathan Hess (he/him)" <103529393+hessjcg@users.noreply.github.com> Date: Tue, 21 Feb 2023 14:33:03 -0700 Subject: [PATCH 27/29] chore: bump go version in release job (#215) --- .github/workflows/release-please-updates.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/release-please-updates.yaml b/.github/workflows/release-please-updates.yaml index 449d4104..c1091854 100644 --- a/.github/workflows/release-please-updates.yaml +++ b/.github/workflows/release-please-updates.yaml @@ -25,7 +25,7 @@ jobs: - name: Setup Go uses: actions/setup-go@v3 with: - go-version: '1.19' + go-version: '1.20' - name: Checkout code uses: actions/checkout@v3 with: From 5204cca618b6bb7588da302d82c6735389eb700f Mon Sep 17 00:00:00 2001 From: Jonathan Hess Date: Tue, 21 Feb 2023 14:37:34 -0700 Subject: [PATCH 28/29] chore: Release 0.3.0 Release-As: 0.3.0 From 53b97905ee71b2902fcff985a7a5d414164e7323 Mon Sep 17 00:00:00 2001 From: "release-please[bot]" <55107282+release-please[bot]@users.noreply.github.com> Date: Tue, 21 Feb 2023 15:00:57 -0700 Subject: [PATCH 29/29] chore(main): Release 0.3.0 (#216) Features - Add new field RolloutStrategy control automatic rollout (#202) (090b88d) - Add new terraform project for e2e test resources (#181) (0140592) - Add script to run terraform with input validation. (#182) (857444a) - Add support for Unix sockets. (#205) (8177a35), closes #47 - Add telemetry settings to configure health check port (#210) (3ede42d) - Add the e2e test job for Cloud Build (#184) (dc2990c) - Automatic changes to workloads when an AuthProxyWorload is deleted (#200) (e11caed) - Automatically trigger pod rollout for appsv1 resources when AuthProxyWorkload changes. (#197) (3b0359b) - Separate terraform for project setup and permissions (#179) (8f43657) - Validate AuthProxyWorkload spec.selector field (#209) (98c460b) - Validate AuthProxyWorkload updates to prevent changes to the workload selector. (#211) (4304283) Co-authored-by: release-please[bot] <55107282+release-please[bot]@users.noreply.github.com> Co-authored-by: Release PR Generate Bot action release-please[bot] --- CHANGELOG.md | 22 ++++++++++++++++++++++ docs/quick-start.md | 2 +- installer/cloud-sql-proxy-operator.yaml | 2 +- installer/install.sh | 4 ++-- version.txt | 2 +- 5 files changed, 27 insertions(+), 5 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 4f0d5864..2d0d55e9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,27 @@ # Changelog +## [0.3.0](https://github.com/GoogleCloudPlatform/cloud-sql-proxy-operator/compare/v0.2.0...v0.3.0) (2023-02-21) + + +### Features + +* add new field RolloutStrategy control automatic rollout ([#202](https://github.com/GoogleCloudPlatform/cloud-sql-proxy-operator/issues/202)) ([090b88d](https://github.com/GoogleCloudPlatform/cloud-sql-proxy-operator/commit/090b88da2f3cbc00ca98bee7cdfbb4e50a6c4cb9)) +* Add new terraform project for e2e test resources ([#181](https://github.com/GoogleCloudPlatform/cloud-sql-proxy-operator/issues/181)) ([0140592](https://github.com/GoogleCloudPlatform/cloud-sql-proxy-operator/commit/0140592b3a19087cc5ee769b542ae461f3a5d1b4)) +* add script to run terraform with input validation. ([#182](https://github.com/GoogleCloudPlatform/cloud-sql-proxy-operator/issues/182)) ([857444a](https://github.com/GoogleCloudPlatform/cloud-sql-proxy-operator/commit/857444ac09b8c1c5c9c3536ed1cab7367f778015)) +* Add support for Unix sockets. ([#205](https://github.com/GoogleCloudPlatform/cloud-sql-proxy-operator/issues/205)) ([8177a35](https://github.com/GoogleCloudPlatform/cloud-sql-proxy-operator/commit/8177a35be7988a01de682d806c05b9306537c3a1)), closes [#47](https://github.com/GoogleCloudPlatform/cloud-sql-proxy-operator/issues/47) +* Add telemetry settings to configure health check port ([#210](https://github.com/GoogleCloudPlatform/cloud-sql-proxy-operator/issues/210)) ([3ede42d](https://github.com/GoogleCloudPlatform/cloud-sql-proxy-operator/commit/3ede42da9f502090d80b95970296f138484ef522)) +* add the e2e test job for Cloud Build ([#184](https://github.com/GoogleCloudPlatform/cloud-sql-proxy-operator/issues/184)) ([dc2990c](https://github.com/GoogleCloudPlatform/cloud-sql-proxy-operator/commit/dc2990c4483d216a31a6cafbf45ebba6936b8c6a)) +* automatic changes to workloads when an AuthProxyWorload is deleted ([#200](https://github.com/GoogleCloudPlatform/cloud-sql-proxy-operator/issues/200)) ([e11caed](https://github.com/GoogleCloudPlatform/cloud-sql-proxy-operator/commit/e11caed179f82ca3d24322d9f80a95174911bddd)) +* Automatically trigger pod rollout for appsv1 resources when AuthProxyWorkload changes. ([#197](https://github.com/GoogleCloudPlatform/cloud-sql-proxy-operator/issues/197)) ([3b0359b](https://github.com/GoogleCloudPlatform/cloud-sql-proxy-operator/commit/3b0359b68b8d5c0dcd3e306102945c6e608ff095)) +* separate terraform for project setup and permissions ([#179](https://github.com/GoogleCloudPlatform/cloud-sql-proxy-operator/issues/179)) ([8f43657](https://github.com/GoogleCloudPlatform/cloud-sql-proxy-operator/commit/8f43657a6e039db0e3c8c57be56ec8d68ee503e9)) +* Validate AuthProxyWorkload spec.selector field ([#209](https://github.com/GoogleCloudPlatform/cloud-sql-proxy-operator/issues/209)) ([98c460b](https://github.com/GoogleCloudPlatform/cloud-sql-proxy-operator/commit/98c460bdd34dfa00815e664f60e38aa7327d92d4)) +* Validate AuthProxyWorkload updates to prevent changes to the workload selector. ([#211](https://github.com/GoogleCloudPlatform/cloud-sql-proxy-operator/issues/211)) ([4304283](https://github.com/GoogleCloudPlatform/cloud-sql-proxy-operator/commit/4304283c1e85b079aab5cbf6c4c2dafb73ed654a)) + + +### Miscellaneous Chores + +* Release 0.3.0 ([5204cca](https://github.com/GoogleCloudPlatform/cloud-sql-proxy-operator/commit/5204cca618b6bb7588da302d82c6735389eb700f)) + ## [0.2.0](https://github.com/GoogleCloudPlatform/cloud-sql-proxy-operator/compare/v0.1.0...v0.2.0) (2023-01-18) diff --git a/docs/quick-start.md b/docs/quick-start.md index b5b7a619..6a7d0970 100644 --- a/docs/quick-start.md +++ b/docs/quick-start.md @@ -20,7 +20,7 @@ Run the following command to install the cloud sql proxy operator into your kubernetes cluster: ```shell -curl https://storage.googleapis.com/cloud-sql-connectors/cloud-sql-proxy-operator/v0.2.0/install.sh | bash +curl https://storage.googleapis.com/cloud-sql-connectors/cloud-sql-proxy-operator/v0.3.0/install.sh | bash ``` This will use `helm` to install the `cert-manager` operator, a prerequisite. Then diff --git a/installer/cloud-sql-proxy-operator.yaml b/installer/cloud-sql-proxy-operator.yaml index 6252c02f..487d5402 100644 --- a/installer/cloud-sql-proxy-operator.yaml +++ b/installer/cloud-sql-proxy-operator.yaml @@ -1422,7 +1422,7 @@ spec: - --leader-elect command: - /manager - image: gcr.io/cloud-sql-connectors/cloud-sql-operator/cloud-sql-proxy-operator:0.3.0-dev + image: gcr.io/cloud-sql-connectors/cloud-sql-operator/cloud-sql-proxy-operator:0.3.0 livenessProbe: httpGet: path: /healthz diff --git a/installer/install.sh b/installer/install.sh index c91dd2d4..d664a8c8 100644 --- a/installer/install.sh +++ b/installer/install.sh @@ -16,8 +16,8 @@ set -euxo # exit 1 from the script when command fails -# If CSQL_OPERATOR_VERSION is not set, use the release version: v0.3.0-dev. -CSQL_OPERATOR_VERSION="${CSQL_OPERATOR_VERSION:-v0.3.0-dev}" +# If CSQL_OPERATOR_VERSION is not set, use the release version: v0.3.0. +CSQL_OPERATOR_VERSION="${CSQL_OPERATOR_VERSION:-v0.3.0}" # If CSQL_CERT_MANAGER_VERSION is not set, use the default: v1.9.1. CSQL_CERT_MANAGER_VERSION="${CSQL_CERT_MANAGER_VERSION:-v1.9.1}" diff --git a/version.txt b/version.txt index d5109100..0d91a54c 100644 --- a/version.txt +++ b/version.txt @@ -1 +1 @@ -0.3.0-dev +0.3.0